patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -36,8 +36,8 @@ import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec.QueryType;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.SmartStoreException;
-import com.salesforce.androidsdk.smartstore.store.StoreCursor;
import com.salesforce.androidsdk.smartstore.store.SoupSpec;
+import com.salesforce.androidsdk.smartstore.store.StoreCursor;
import com.salesforce.androidsdk.smartstore.ui.SmartStoreInspectorActivity;
import net.sqlcipher.database.SQLiteDatabase; | 1 | /*
* Copyright (c) 2011-2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.phonegap.plugin;
import android.app.Activity;
import android.util.Log;
import android.util.SparseArray;
import com.salesforce.androidsdk.smartstore.app.SmartStoreSDKManager;
import com.salesforce.androidsdk.smartstore.store.IndexSpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec.QueryType;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.SmartStoreException;
import com.salesforce.androidsdk.smartstore.store.StoreCursor;
import com.salesforce.androidsdk.smartstore.store.SoupSpec;
import com.salesforce.androidsdk.smartstore.ui.SmartStoreInspectorActivity;
import net.sqlcipher.database.SQLiteDatabase;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.PluginResult;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* PhoneGap plugin for smart store.
*/
public class SmartStorePlugin extends ForcePlugin {
// Keys in json from/to javascript
public static final String BEGIN_KEY = "beginKey";
public static final String END_KEY = "endKey";
public static final String INDEX_PATH = "indexPath";
public static final String LIKE_KEY = "likeKey";
public static final String MATCH_KEY = "matchKey";
public static final String SMART_SQL = "smartSql";
public static final String ORDER_PATH = "orderPath";
public static final String ORDER = "order";
public static final String PAGE_SIZE = "pageSize";
public static final String QUERY_TYPE = "queryType";
private static final String SOUP_SPEC = "soupSpec";
private static final String SOUP_SPEC_NAME = "name";
private static final String SOUP_SPEC_FEATURES = "features";
static final String TOTAL_ENTRIES = "totalEntries";
static final String TOTAL_PAGES = "totalPages";
static final String RE_INDEX_DATA = "reIndexData";
static final String CURRENT_PAGE_INDEX = "currentPageIndex";
static final String CURRENT_PAGE_ORDERED_ENTRIES = "currentPageOrderedEntries";
static final String CURSOR_ID = "cursorId";
private static final String TYPE = "type";
private static final String SOUP_NAME = "soupName";
private static final String PATH = "path";
private static final String PATHS = "paths";
private static final String QUERY_SPEC = "querySpec";
private static final String EXTERNAL_ID_PATH = "externalIdPath";
private static final String ENTRIES = "entries";
private static final String ENTRY_IDS = "entryIds";
private static final String INDEX = "index";
private static final String INDEXES = "indexes";
private static final String IS_GLOBAL_STORE = "isGlobalStore";
// Map of cursor id to StoreCursor, per database.
private static Map<SQLiteDatabase, SparseArray<StoreCursor>> STORE_CURSORS = new HashMap<SQLiteDatabase, SparseArray<StoreCursor>>();
private synchronized static SparseArray<StoreCursor> getSmartStoreCursors(SmartStore store) {
final SQLiteDatabase db = store.getDatabase();
if (!STORE_CURSORS.containsKey(db)) {
STORE_CURSORS.put(db, new SparseArray<StoreCursor>());
}
return STORE_CURSORS.get(db);
}
/**
* Supported plugin actions that the client can take.
*/
enum Action {
pgAlterSoup,
pgClearSoup,
pgCloseCursor,
pgGetDatabaseSize,
pgGetSoupIndexSpecs,
pgGetSoupSpec,
pgMoveCursorToPageIndex,
pgQuerySoup,
pgRegisterSoup,
pgReIndexSoup,
pgRemoveFromSoup,
pgRemoveSoup,
pgRetrieveSoupEntries,
pgRunSmartQuery,
pgShowInspector,
pgSoupExists,
pgUpsertSoupEntries
}
@Override
public boolean execute(String actionStr, JavaScriptPluginVersion jsVersion,
final JSONArray args, final CallbackContext callbackContext) throws JSONException {
final long start = System.currentTimeMillis();
// Figure out action
final Action action;
try {
action = Action.valueOf(actionStr);
} catch (IllegalArgumentException e) {
Log.e("SmartStorePlugin.execute", "Unknown action " + actionStr);
return false;
}
// Not running smartstore action on the main thread
cordova.getThreadPool().execute(new Runnable() {
@Override
public void run() {
// All smart store action need to be serialized
synchronized (SmartStorePlugin.class) {
try {
switch (action) {
case pgAlterSoup: alterSoup(args, callbackContext); break;
case pgClearSoup: clearSoup(args, callbackContext); break;
case pgCloseCursor: closeCursor(args, callbackContext); break;
case pgGetDatabaseSize: getDatabaseSize(args, callbackContext); break;
case pgGetSoupIndexSpecs: getSoupIndexSpecs(args, callbackContext); break;
case pgGetSoupSpec: getSoupSpec(args, callbackContext); break;
case pgMoveCursorToPageIndex: moveCursorToPageIndex(args, callbackContext); break;
case pgQuerySoup: querySoup(args, callbackContext); break;
case pgRegisterSoup: registerSoup(args, callbackContext); break;
case pgReIndexSoup: reIndexSoup(args, callbackContext); break;
case pgRemoveFromSoup: removeFromSoup(args, callbackContext); break;
case pgRemoveSoup: removeSoup(args, callbackContext); break;
case pgRetrieveSoupEntries: retrieveSoupEntries(args, callbackContext); break;
case pgRunSmartQuery: runSmartQuery(args, callbackContext); break;
case pgShowInspector: showInspector(args, callbackContext); break;
case pgSoupExists: soupExists(args, callbackContext); break;
case pgUpsertSoupEntries: upsertSoupEntries(args, callbackContext); break;
default: throw new SmartStoreException("No handler for action " + action);
}
} catch (Exception e) {
Log.w("SmartStorePlugin.execute", e.getMessage(), e);
callbackContext.error(e.getMessage());
}
Log.d("SmartSTorePlugin.execute", "Total time for " + action + "->" + (System.currentTimeMillis() - start));
}
}
});
Log.d("SmartSTorePlugin.execute", "Main thread time for " + action + "->" + (System.currentTimeMillis() - start));
return true;
}
/**
* Native implementation of pgRemoveFromSoup
* @param args
* @param callbackContext
* @throws JSONException
*/
private void removeFromSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
JSONArray jsonSoupEntryIds = arg0.optJSONArray(ENTRY_IDS);
JSONObject querySpecJson = arg0.optJSONObject(QUERY_SPEC);
if (jsonSoupEntryIds != null) {
Long[] soupEntryIds = new Long[jsonSoupEntryIds.length()];
for (int i = 0; i < jsonSoupEntryIds.length(); i++) {
soupEntryIds[i] = jsonSoupEntryIds.getLong(i);
}
// Run remove
smartStore.delete(soupName, soupEntryIds);
}
else {
QuerySpec querySpec = QuerySpec.fromJSON(soupName, querySpecJson);
// Run remove
smartStore.deleteByQuery(soupName, querySpec);
}
callbackContext.success();
}
/**
* Native implementation of pgRetrieveSoupEntries
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void retrieveSoupEntries(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
JSONArray jsonSoupEntryIds = arg0.getJSONArray(ENTRY_IDS);
Long[] soupEntryIds = new Long[jsonSoupEntryIds.length()];
for (int i = 0; i < jsonSoupEntryIds.length(); i++) {
soupEntryIds[i] = jsonSoupEntryIds.getLong(i);
}
// Run retrieve
JSONArray result = smartStore.retrieve(soupName, soupEntryIds);
PluginResult pluginResult = new PluginResult(PluginResult.Status.OK, result);
callbackContext.sendPluginResult(pluginResult);
}
/**
* Native implementation of pgCloseCursor
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void closeCursor(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
Integer cursorId = arg0.getInt(CURSOR_ID);
final SmartStore smartStore = getSmartStore(arg0);
// Drop cursor from storeCursors map
getSmartStoreCursors(smartStore).remove(cursorId);
callbackContext.success();
}
/**
* Native implementation of pgMoveCursorToPageIndex
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void moveCursorToPageIndex(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
Integer cursorId = arg0.getInt(CURSOR_ID);
Integer index = arg0.getInt(INDEX);
final SmartStore smartStore = getSmartStore(arg0);
// Get cursor
final StoreCursor storeCursor = getSmartStoreCursors(smartStore).get(cursorId);
if (storeCursor == null) {
callbackContext.error("Invalid cursor id");
}
// Change page
storeCursor.moveToPageIndex(index);
// Build json result
JSONObject result = storeCursor.getData(smartStore);
// Done
callbackContext.success(result);
}
/**
* Native implementation of pgShowInspector
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void showInspector(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
boolean isGlobal = getIsGlobal(arg0);
Activity activity = cordova.getActivity();
activity.startActivity(SmartStoreInspectorActivity.getIntent(activity, isGlobal, null));
}
/**
* Native implementation of pgSoupExists
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void soupExists(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
// Run upsert
boolean exists = smartStore.hasSoup(soupName);
PluginResult pluginResult = new PluginResult(PluginResult.Status.OK, exists);
callbackContext.sendPluginResult(pluginResult);
}
/**
* Native implementation of pgUpsertSoupEntries
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void upsertSoupEntries(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
JSONArray entriesJson = arg0.getJSONArray(ENTRIES);
String externalIdPath = arg0.getString(EXTERNAL_ID_PATH);
List<JSONObject> entries = new ArrayList<JSONObject>();
for (int i = 0; i < entriesJson.length(); i++) {
entries.add(entriesJson.getJSONObject(i));
}
// Run upsert
synchronized(smartStore.getDatabase()) {
smartStore.beginTransaction();
try {
JSONArray results = new JSONArray();
for (JSONObject entry : entries) {
results.put(smartStore.upsert(soupName, entry, externalIdPath, false));
}
smartStore.setTransactionSuccessful();
PluginResult pluginResult = new PluginResult(PluginResult.Status.OK, results);
callbackContext.sendPluginResult(pluginResult);
} finally {
smartStore.endTransaction();
}
}
}
/**
* Native implementation of pgRegisterSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void registerSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.isNull(SOUP_NAME) ? null : arg0.getString(SOUP_NAME);
JSONArray indexesJson = arg0.getJSONArray(INDEXES);
IndexSpec[] indexSpecs = IndexSpec.fromJSON(indexesJson);
final SmartStore smartStore = getSmartStore(arg0);
// Get soup spec if available
JSONObject soupSpecObj = arg0.optJSONObject(SOUP_SPEC);
if (soupSpecObj != null) {
// Get soup name
soupName = soupSpecObj.getString(SOUP_SPEC_NAME);
// Get features
JSONArray featuresJson = soupSpecObj.optJSONArray(SOUP_SPEC_FEATURES);
if (featuresJson == null) {
featuresJson = new JSONArray();
}
String[] features = new String[featuresJson.length()];
for (int i = 0; i < featuresJson.length(); i++) {
features[i] = featuresJson.getString(i);
}
// Run register soup with spec
smartStore.registerSoupWithSpec(new SoupSpec(soupName, features), indexSpecs);
} else {
// Run register soup
smartStore.registerSoup(soupName, indexSpecs);
}
callbackContext.success(soupName);
}
/**
* Native implementation of pgQuerySoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void querySoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
JSONObject querySpecJson = arg0.getJSONObject(QUERY_SPEC);
QuerySpec querySpec = QuerySpec.fromJSON(soupName, querySpecJson);
if (querySpec.queryType == QueryType.smart) {
throw new RuntimeException("Smart queries can only be run through runSmartQuery");
}
// Run query
runQuery(smartStore, querySpec, callbackContext);
}
/**
* Native implementation of pgRunSmartSql
* @param args
* @param callbackContext
*/
private void runSmartQuery(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
JSONObject querySpecJson = arg0.getJSONObject(QUERY_SPEC);
final SmartStore smartStore = getSmartStore(arg0);
QuerySpec querySpec = QuerySpec.fromJSON(null, querySpecJson);
if (querySpec.queryType != QueryType.smart) {
throw new RuntimeException("runSmartQuery can only run smart queries");
}
// Run query
runQuery(smartStore, querySpec, callbackContext);
}
/**
* Helper for querySoup and runSmartSql
* @param querySpec
* @param callbackContext
* @throws JSONException
*/
private void runQuery(SmartStore smartStore, QuerySpec querySpec,
CallbackContext callbackContext) throws JSONException {
// Build store cursor
final StoreCursor storeCursor = new StoreCursor(smartStore, querySpec);
getSmartStoreCursors(smartStore).put(storeCursor.cursorId, storeCursor);
// Build json result
JSONObject result = storeCursor.getData(smartStore);
// Done
callbackContext.success(result);
}
/**
* Native implementation of pgRemoveSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void removeSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
// Run remove
smartStore.dropSoup(soupName);
callbackContext.success();
}
/**
* Native implementation of pgClearSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void clearSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
// Run clear
smartStore.clearSoup(soupName);
callbackContext.success();
}
/**
* Native implementation of pgGetDatabaseSize
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void getDatabaseSize(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
final JSONObject arg0 = args.optJSONObject(0);
final SmartStore smartStore = getSmartStore(arg0);
int databaseSize = smartStore.getDatabaseSize();
callbackContext.success(databaseSize);
}
/**
* Native implementation of pgAlterSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void alterSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
List<IndexSpec> indexSpecs = new ArrayList<IndexSpec>();
JSONArray indexesJson = arg0.getJSONArray(INDEXES);
for (int i = 0; i < indexesJson.length(); i++) {
JSONObject indexJson = indexesJson.getJSONObject(i);
indexSpecs.add(new IndexSpec(indexJson.getString(PATH), SmartStore.Type.valueOf(indexJson.getString(TYPE))));
}
boolean reIndexData = arg0.getBoolean(RE_INDEX_DATA);
// Run register
smartStore.alterSoup(soupName, indexSpecs.toArray(new IndexSpec[0]), reIndexData);
callbackContext.success(soupName);
}
/**
* Native implementation of pgReIndexSoup
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void reIndexSoup(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
List<String> indexPaths = new ArrayList<String>();
JSONArray indexPathsJson = arg0.getJSONArray(PATHS);
for (int i = 0; i < indexPathsJson.length(); i++) {
indexPaths.add(indexPathsJson.getString(i));
}
// Run register
smartStore.reIndexSoup(soupName, indexPaths.toArray(new String[0]), true);
callbackContext.success(soupName);
}
/**
* Native implementation of pgGetSoupIndexSpecs
* @param args
* @param callbackContext
* @return
* @throws JSONException
*/
private void getSoupIndexSpecs(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
final SmartStore smartStore = getSmartStore(arg0);
// Get soup index specs
IndexSpec[] indexSpecs = smartStore.getSoupIndexSpecs(soupName);
JSONArray indexSpecsJson = new JSONArray();
for (int i = 0; i < indexSpecs.length; i++) {
JSONObject indexSpecJson = new JSONObject();
IndexSpec indexSpec = indexSpecs[i];
indexSpecJson.put(PATH, indexSpec.path);
indexSpecJson.put(TYPE, indexSpec.type);
indexSpecsJson.put(indexSpecJson);
}
callbackContext.success(indexSpecsJson);
}
/**
* Native implementation of pgGetSoupSpec
* @param args
* @param callbackContext
* @throws JSONException
*/
private void getSoupSpec(JSONArray args, CallbackContext callbackContext) throws JSONException {
// Parse args
JSONObject arg0 = args.getJSONObject(0);
String soupName = arg0.getString(SOUP_NAME);
// Get soup specs
SmartStore smartStore = getSmartStore(arg0);
SoupSpec soupSpec = smartStore.getSoupSpec(soupName);
callbackContext.success(soupSpec.toJSON());
}
/**
* Return smartstore to use
* @param arg0 first argument passed in plugin call
* @return
*/
private SmartStore getSmartStore(JSONObject arg0) {
boolean isGlobal = getIsGlobal(arg0);
return (isGlobal
? SmartStoreSDKManager.getInstance().getGlobalSmartStore()
: SmartStoreSDKManager.getInstance().getSmartStore());
}
/**
* Return the value of the isGlobalStore argument
* @param arg0
* @return
*/
private boolean getIsGlobal(JSONObject arg0) {
return arg0 != null ? arg0.optBoolean(IS_GLOBAL_STORE, false) : false;
}
}
| 1 | 15,251 | Took the opportunity to fix the import ordering. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -130,9 +130,13 @@ class XRDCalculator(AbstractDiffractionPatternCalculator):
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
- else:
+ elif isinstance(wavelength, int):
+ self.wavelength = float(wavelength)
+ elif isinstance(wavelength, str):
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
+ else:
+ raise TypeError("'wavelength' must be either of: float, int or str")
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
| 1 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements an XRD pattern calculator.
"""
import json
import os
from math import asin, cos, degrees, pi, radians, sin
import numpy as np
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from .core import (
AbstractDiffractionPatternCalculator,
DiffractionPattern,
get_unique_families,
)
# XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__), "atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDCalculator(AbstractDiffractionPatternCalculator):
r"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent facets. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
# Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
else:
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_pattern(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the diffraction pattern for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRDPattern)
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (
(0, 2 / wavelength)
if two_theta_range is None
else [2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
)
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError(
"Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol
)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind, _ in sorted(recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
# Force miller indices to be integers.
hkl = [int(round(i)) for i in hkl]
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
# Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r) * dw_correction)
# Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / (sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
# Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], -hkl[0] - hkl[1], hkl[2])
# Deal with floating point precision issues.
ind = np.where(
np.abs(np.subtract(two_thetas, two_theta)) < AbstractDiffractionPatternCalculator.TWO_THETA_TOL
)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0][0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0][0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)], d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
x = []
y = []
hkls = []
d_hkls = []
for k in sorted(peaks.keys()):
v = peaks[k]
fam = get_unique_families(v[1])
if v[0] / max_intensity * 100 > AbstractDiffractionPatternCalculator.SCALED_INTENSITY_TOL:
x.append(k)
y.append(v[0])
hkls.append([{"hkl": hkl, "multiplicity": mult} for hkl, mult in fam.items()])
d_hkls.append(v[2])
xrd = DiffractionPattern(x, y, hkls, d_hkls)
if scaled:
xrd.normalize(mode="max", value=100)
return xrd
| 1 | 19,647 | I think int and float can be handled in one if statement. The subsequent calculations don't really care whether it si a float or an int. | materialsproject-pymatgen | py |
@@ -134,14 +134,14 @@ func TestDelayedCancellationEnabled(t *testing.T) {
t.Parallel()
ctx, cancel := makeContextWithDelayedCancellation(t)
- EnableDelayedCancellationWithGracePeriod(ctx, 15*time.Millisecond)
+ EnableDelayedCancellationWithGracePeriod(ctx, 50*time.Millisecond)
cancel()
select {
case <-ctx.Done():
t.Fatalf("Cancellation is not delayed")
- case <-time.After(10 * time.Millisecond):
+ case <-time.After(20 * time.Millisecond):
}
<-ctx.Done() | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"testing"
"time"
"golang.org/x/net/context"
)
type testDCKeyType int
const (
testDCKey testDCKeyType = iota
)
func TestReplayableContext(t *testing.T) {
t.Parallel()
ctx := context.Background()
ctx = NewContextReplayable(ctx, func(ctx context.Context) context.Context {
return context.WithValue(ctx, testDCKey, "O_O")
})
ctx, cancel := context.WithCancel(ctx)
ctx, err := NewContextWithReplayFrom(ctx)
if err != nil {
t.Fatalf("calling NewContextWithReplayFrom error: %s", err)
}
// Test if replay was run properly
if ctx.Value(testDCKey) != "O_O" {
t.Fatalf("NewContextWithReplayFrom did not replay attached replayFunc")
}
// Test if cancellation is disabled
cancel()
select {
case <-ctx.Done():
t.Fatalf("NewContextWithReplayFrom did not disconnect the cancel function")
default:
}
// make sure the new ctx is also replayable
ctx, err = NewContextWithReplayFrom(ctx)
if err != nil {
t.Fatalf("calling NewContextWithReplayFrom error: %s", err)
}
if ctx.Value(testDCKey) != "O_O" {
t.Fatalf("NewContextWithReplayFrom did not replay attached replayFunc")
}
}
func makeContextWithDelayedCancellation(t *testing.T) (
ctx context.Context, originalCancel context.CancelFunc) {
ctx = context.Background()
ctx = NewContextReplayable(ctx, func(ctx context.Context) context.Context {
return context.WithValue(ctx, testDCKey, "O_O")
})
ctx, cancel := context.WithCancel(ctx)
ctx, err := NewContextWithCancellationDelayer(ctx)
if err != nil {
t.Fatalf("calling NewContextWithCancellationDelayer error: %s", err)
}
// Test NewContextWithCancellationDelayer does replay properly
if ctx.Value(testDCKey) != "O_O" {
t.Fatalf(
"NewContextWithCancellationDelayer did not replay attached replayFunc")
}
return ctx, cancel
}
func TestDelayedCancellationCancelWhileNotEnabled(t *testing.T) {
t.Parallel()
ctx, cancel := makeContextWithDelayedCancellation(t)
cancel()
select {
case <-ctx.Done():
case <-time.After(100 * time.Millisecond):
t.Fatalf("Cancellation did not happen even though " +
"EnableDelayedCancellationWithGracePeriod has not been called yet")
}
}
func TestDelayedCancellationCleanupWhileNotEnabled(t *testing.T) {
t.Parallel()
ctx, _ := makeContextWithDelayedCancellation(t)
if err := CleanupCancellationDelayer(ctx); err != nil {
t.Fatalf("calling CleanupCancellationDelayer error: %s", err)
}
select {
case <-ctx.Done():
case <-time.After(100 * time.Millisecond):
t.Fatalf("Cancellation did not happen even though " +
"EnableDelayedCancellationWithGracePeriod has not been called yet")
}
}
func TestDelayedCancellationSecondEnable(t *testing.T) {
t.Parallel()
ctx, cancel := makeContextWithDelayedCancellation(t)
defer cancel()
err := EnableDelayedCancellationWithGracePeriod(ctx, 0)
if err != nil {
t.Fatalf("1st EnableDelayedCancellationWithGracePeriod failed: %v", err)
}
cancel()
<-ctx.Done()
// parent context is not canceled; second "enable" should succeed even it's
// after grace period
err = EnableDelayedCancellationWithGracePeriod(ctx, 0)
if err == nil {
t.Fatalf("2nd EnableDelayedCancellationWithGracePeriod succeeded even " +
"though more than grace period has passed since parent context was " +
"canceled")
}
}
func TestDelayedCancellationEnabled(t *testing.T) {
t.Parallel()
ctx, cancel := makeContextWithDelayedCancellation(t)
EnableDelayedCancellationWithGracePeriod(ctx, 15*time.Millisecond)
cancel()
select {
case <-ctx.Done():
t.Fatalf("Cancellation is not delayed")
case <-time.After(10 * time.Millisecond):
}
<-ctx.Done()
// if test timeouts, then it's a failure: Cancellation did not happen after
// grace period
}
| 1 | 16,951 | Can we keep this at 10 to reduce the probability of a flake? Or are you afraid this would be too likely to give a false positive if delayed cancellation is every actually broken? | keybase-kbfs | go |
@@ -15,9 +15,14 @@
package gcsblob
import (
+ "context"
+ "fmt"
+ "net/http"
"strings"
"testing"
+ "github.com/google/go-x-cloud/gcp"
+
"google.golang.org/api/googleapi"
)
| 1 | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsblob
import (
"strings"
"testing"
"google.golang.org/api/googleapi"
)
func TestValidateBucketChar(t *testing.T) {
t.Parallel()
tests := []struct {
name string
valid bool
}{
{"bucket-name", true},
{"8ucket_nam3", true},
{"bn", false},
{"_bucketname_", false},
{"bucketnameUpper", false},
{"bucketname?invalidchar", false},
}
for i, test := range tests {
err := validateBucketChar(test.name)
if test.valid && err != nil {
t.Errorf("%d) got %v, want nil", i, err)
} else if !test.valid && err == nil {
t.Errorf("%d) got nil, want invalid error", i)
}
}
}
func TestValidateObjectChar(t *testing.T) {
t.Parallel()
tests := []struct {
name string
valid bool
}{
{"object-name", true},
{"文件名", true},
{"ファイル名", true},
{"", false},
{"\xF4\x90\x80\x80", false},
{strings.Repeat("a", 1024), true},
{strings.Repeat("a", 1025), false},
{strings.Repeat("☺", 342), false},
}
for i, test := range tests {
err := validateObjectChar(test.name)
if test.valid && err != nil {
t.Errorf("%d) got %v, want nil", i, err)
} else if !test.valid && err == nil {
t.Errorf("%d) got nil, want invalid error", i)
}
}
}
func TestBufferSize(t *testing.T) {
t.Parallel()
tests := []struct {
size int
want int
}{
{
size: 5 * 1024 * 1024,
want: 5 * 1024 * 1024,
},
{
size: 0,
want: googleapi.DefaultUploadChunkSize,
},
{
size: -1024,
want: 0,
},
}
for i, test := range tests {
got := bufferSize(test.size)
if got != test.want {
t.Errorf("%d) got buffer size %d, want %d", i, got, test.want)
}
}
}
| 1 | 10,154 | nit: add a blank line under this. | google-go-cloud | go |
@@ -67,6 +67,7 @@ var (
bytecodeFlag = flag.NewStringVarP("bytecode", "b", "", "set the byte code")
yesFlag = flag.BoolVarP("assume-yes", "y", false, "answer yes for all confirmations")
passwordFlag = flag.NewStringVarP("password", "P", "", "input password for account")
+ chainIDFlag = flag.NewUint64VarP("chainID", "", 0, "set chainID for action")
)
// ActionCmd represents the action command | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"strings"
"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/ioctl/cmd/account"
"github.com/iotexproject/iotex-core/ioctl/cmd/hdwallet"
"github.com/iotexproject/iotex-core/ioctl/config"
"github.com/iotexproject/iotex-core/ioctl/flag"
"github.com/iotexproject/iotex-core/ioctl/output"
"github.com/iotexproject/iotex-core/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
// Multi-language support
var (
actionCmdShorts = map[config.Language]string{
config.English: "Manage actions of IoTeX blockchain",
config.Chinese: "管理IoTex区块链的行为", // this translation
}
actionCmdUses = map[config.Language]string{
config.English: "action",
config.Chinese: "action 行为", // this translation
}
flagActionEndPointUsages = map[config.Language]string{
config.English: "set endpoint for once",
config.Chinese: "一次设置端点", // this translation
}
flagActionInsecureUsages = map[config.Language]string{
config.English: "insecure connection for once",
config.Chinese: "一次不安全连接", // this translation
}
)
const defaultGasLimit = uint64(20000000)
// var defaultGasPrice = big.NewInt(unit.Qev)
// Flags
var (
gasLimitFlag = flag.NewUint64VarP("gas-limit", "l", defaultGasLimit, "set gas limit")
gasPriceFlag = flag.NewStringVarP("gas-price", "p", "1", "set gas price (unit: 10^(-6)IOTX), use suggested gas price if input is \"0\"")
nonceFlag = flag.NewUint64VarP("nonce", "n", 0, "set nonce (default using pending nonce)")
signerFlag = flag.NewStringVarP("signer", "s", "", "choose a signing account")
bytecodeFlag = flag.NewStringVarP("bytecode", "b", "", "set the byte code")
yesFlag = flag.BoolVarP("assume-yes", "y", false, "answer yes for all confirmations")
passwordFlag = flag.NewStringVarP("password", "P", "", "input password for account")
)
// ActionCmd represents the action command
var ActionCmd = &cobra.Command{
Use: config.TranslateInLang(actionCmdUses, config.UILanguage),
Short: config.TranslateInLang(actionCmdShorts, config.UILanguage),
}
type sendMessage struct {
Info string `json:"info"`
TxHash string `json:"txHash"`
URL string `json:"url"`
}
func (m *sendMessage) String() string {
if output.Format == "" {
return fmt.Sprintf("%s\nWait for several seconds and query this action by hash: %s", m.Info, m.URL)
}
return output.FormatString(output.Result, m)
}
func init() {
ActionCmd.AddCommand(actionHashCmd)
ActionCmd.AddCommand(actionTransferCmd)
ActionCmd.AddCommand(actionDeployCmd)
ActionCmd.AddCommand(actionInvokeCmd)
ActionCmd.AddCommand(actionReadCmd)
ActionCmd.AddCommand(actionClaimCmd)
ActionCmd.AddCommand(actionDepositCmd)
ActionCmd.AddCommand(actionSendRawCmd)
ActionCmd.PersistentFlags().StringVar(&config.ReadConfig.Endpoint, "endpoint",
config.ReadConfig.Endpoint, config.TranslateInLang(flagActionEndPointUsages,
config.UILanguage))
ActionCmd.PersistentFlags().BoolVar(&config.Insecure, "insecure", config.Insecure,
config.TranslateInLang(flagActionInsecureUsages, config.UILanguage))
}
func decodeBytecode() ([]byte, error) {
return hex.DecodeString(util.TrimHexPrefix(bytecodeFlag.Value().(string)))
}
// Signer returns signer's address
func Signer() (address string, err error) {
addressOrAlias := signerFlag.Value().(string)
if util.AliasIsHdwalletKey(addressOrAlias) {
return addressOrAlias, nil
}
if addressOrAlias == "" {
addressOrAlias, err = config.GetContextAddressOrAlias()
if err != nil {
return
}
}
return util.GetAddress(addressOrAlias)
}
func nonce(executor string) (uint64, error) {
if util.AliasIsHdwalletKey(executor) {
// for hdwallet key, get the nonce in SendAction()
return 0, nil
}
nonce := nonceFlag.Value().(uint64)
if nonce != 0 {
return nonce, nil
}
accountMeta, err := account.GetAccountMeta(executor)
if err != nil {
return 0, output.NewError(0, "failed to get account meta", err)
}
return accountMeta.PendingNonce, nil
}
// RegisterWriteCommand registers action flags for command
func RegisterWriteCommand(cmd *cobra.Command) {
gasLimitFlag.RegisterCommand(cmd)
gasPriceFlag.RegisterCommand(cmd)
signerFlag.RegisterCommand(cmd)
nonceFlag.RegisterCommand(cmd)
yesFlag.RegisterCommand(cmd)
passwordFlag.RegisterCommand(cmd)
}
// gasPriceInRau returns the suggest gas price
func gasPriceInRau() (*big.Int, error) {
if account.CryptoSm2 {
return big.NewInt(0), nil
}
gasPrice := gasPriceFlag.Value().(string)
if len(gasPrice) != 0 {
return util.StringToRau(gasPrice, util.GasPriceDecimalNum)
}
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return nil, output.NewError(output.NetworkError, "failed to connect to endpoint", err)
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
ctx := context.Background()
jwtMD, err := util.JwtAuth()
if err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
request := &iotexapi.SuggestGasPriceRequest{}
response, err := cli.SuggestGasPrice(ctx, request)
if err != nil {
sta, ok := status.FromError(err)
if ok {
return nil, output.NewError(output.APIError, sta.Message(), nil)
}
return nil, output.NewError(output.NetworkError, "failed to invoke SuggestGasPrice api", err)
}
return new(big.Int).SetUint64(response.GasPrice), nil
}
func fixGasLimit(caller string, execution *action.Execution) (*action.Execution, error) {
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return nil, output.NewError(output.NetworkError, "failed to connect to endpoint", err)
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
request := &iotexapi.EstimateActionGasConsumptionRequest{
Action: &iotexapi.EstimateActionGasConsumptionRequest_Execution{
Execution: execution.Proto(),
},
CallerAddress: caller,
}
ctx := context.Background()
jwtMD, err := util.JwtAuth()
if err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
res, err := cli.EstimateActionGasConsumption(ctx, request)
if err != nil {
sta, ok := status.FromError(err)
if ok {
return nil, output.NewError(output.APIError, sta.Message(), nil)
}
return nil, output.NewError(output.NetworkError,
"failed to invoke EstimateActionGasConsumption api", err)
}
return action.NewExecution(execution.Contract(), execution.Nonce(), execution.Amount(), res.Gas, execution.GasPrice(), execution.Data())
}
// SendRaw sends raw action to blockchain
func SendRaw(selp *iotextypes.Action) error {
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return output.NewError(output.NetworkError, "failed to connect to endpoint", err)
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
ctx := context.Background()
jwtMD, err := util.JwtAuth()
if err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
request := &iotexapi.SendActionRequest{Action: selp}
if _, err = cli.SendAction(ctx, request); err != nil {
if sta, ok := status.FromError(err); ok {
return output.NewError(output.APIError, sta.Message(), nil)
}
return output.NewError(output.NetworkError, "failed to invoke SendAction api", err)
}
shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))
txhash := hex.EncodeToString(shash[:])
message := sendMessage{Info: "Action has been sent to blockchain.", TxHash: txhash, URL: "https://"}
switch config.ReadConfig.Explorer {
case "iotexscan":
if strings.Contains(config.ReadConfig.Endpoint, "testnet") {
message.URL += "testnet."
}
message.URL += "iotexscan.io/action/" + txhash
case "iotxplorer":
message.URL = "iotxplorer.io/actions/" + txhash
default:
message.URL = config.ReadConfig.Explorer + txhash
}
fmt.Println(message.String())
return nil
}
// PrivateKeyFromSigner returns private key from signer
func PrivateKeyFromSigner(signer string) (crypto.PrivateKey, error) {
var prvKey crypto.PrivateKey
var err error
if account.IsSignerExist(signer) || util.AliasIsHdwalletKey(signer) {
// Get signer's password
password := passwordFlag.Value().(string)
if password == "" {
output.PrintQuery(fmt.Sprintf("Enter password #%s:\n", signer))
password, err = util.ReadSecretFromStdin()
if err != nil {
return nil, output.NewError(output.InputError, "failed to get password", err)
}
}
if util.AliasIsHdwalletKey(signer) {
account, change, index, err := util.ParseHdwPath(signer)
if err != nil {
return nil, output.NewError(output.InputError, "invalid hdwallet key format", err)
}
_, prvKey, err = hdwallet.DeriveKey(account, change, index, password)
if err != nil {
return nil, output.NewError(output.InputError, "failed to derive key from HDWallet", err)
}
} else {
prvKey, err = account.LocalAccountToPrivateKey(signer, password)
if err != nil {
return nil, output.NewError(output.KeystoreError, "failed to get private key from keystore", err)
}
}
return prvKey, nil
}
// Get private key
output.PrintQuery(fmt.Sprintf("Enter private key #%s:", signer))
prvKeyString, err := util.ReadSecretFromStdin()
if err != nil {
return nil, output.NewError(output.InputError, "failed to get private key", err)
}
prvKey, err = crypto.HexStringToPrivateKey(prvKeyString)
if err != nil {
return nil, output.NewError(output.InputError, "failed to create private key from HexString input", err)
}
return prvKey, nil
}
// SendAction sends signed action to blockchain
func SendAction(elp action.Envelope, signer string) error {
prvKey, err := PrivateKeyFromSigner(signer)
if err != nil {
return err
}
if util.AliasIsHdwalletKey(signer) {
addr := prvKey.PublicKey().Address()
signer = addr.String()
nonce, err := nonce(signer)
if err != nil {
return output.NewError(0, "failed to get nonce ", err)
}
elp.SetNonce(nonce)
}
sealed, err := action.Sign(elp, prvKey)
prvKey.Zero()
if err != nil {
return output.NewError(output.CryptoError, "failed to sign action", err)
}
if err := isBalanceEnough(signer, sealed); err != nil {
return output.NewError(0, "failed to pass balance check", err) // TODO: undefined error
}
selp := sealed.Proto()
actionInfo, err := printActionProto(selp)
if err != nil {
return output.NewError(0, "failed to print action proto message", err)
}
if yesFlag.Value() == false {
var confirm string
info := fmt.Sprintln(actionInfo + "\nPlease confirm your action.\n")
message := output.ConfirmationMessage{Info: info, Options: []string{"yes"}}
fmt.Println(message.String())
fmt.Scanf("%s", &confirm)
if !strings.EqualFold(confirm, "yes") {
output.PrintResult("quit")
return nil
}
}
return SendRaw(selp)
}
// Execute sends signed execution transaction to blockchain
func Execute(contract string, amount *big.Int, bytecode []byte) error {
if len(contract) == 0 && len(bytecode) == 0 {
return output.NewError(output.InputError, "failed to deploy contract with empty bytecode", nil)
}
gasPriceRau, err := gasPriceInRau()
if err != nil {
return output.NewError(0, "failed to get gas price", err)
}
signer, err := Signer()
if err != nil {
return output.NewError(output.AddressError, "failed to get signer address", err)
}
nonce, err := nonce(signer)
if err != nil {
return output.NewError(0, "failed to get nonce", err)
}
gasLimit := gasLimitFlag.Value().(uint64)
tx, err := action.NewExecution(contract, nonce, amount, gasLimit, gasPriceRau, bytecode)
if err != nil || tx == nil {
return output.NewError(output.InstantiationError, "failed to make a Execution instance", err)
}
if gasLimit == 0 {
tx, err = fixGasLimit(signer, tx)
if err != nil || tx == nil {
return output.NewError(0, "failed to fix Execution gaslimit", err)
}
gasLimit = tx.GasLimit()
}
return SendAction(
(&action.EnvelopeBuilder{}).
SetNonce(nonce).
SetGasPrice(gasPriceRau).
SetGasLimit(gasLimit).
SetAction(tx).Build(),
signer,
)
}
// Read reads smart contract on IoTeX blockchain
func Read(contract address.Address, amount string, bytecode []byte) (string, error) {
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return "", output.NewError(output.NetworkError, "failed to connect to endpoint", err)
}
defer conn.Close()
ctx := context.Background()
jwtMD, err := util.JwtAuth()
if err == nil {
ctx = metautils.NiceMD(jwtMD).ToOutgoing(ctx)
}
callerAddr, _ := Signer()
if callerAddr == "" {
callerAddr = address.ZeroAddress
}
res, err := iotexapi.NewAPIServiceClient(conn).ReadContract(
ctx,
&iotexapi.ReadContractRequest{
Execution: &iotextypes.Execution{
Amount: amount,
Contract: contract.String(),
Data: bytecode,
},
CallerAddress: callerAddr,
GasLimit: gasLimitFlag.Value().(uint64),
},
)
if err == nil {
return res.Data, nil
}
if sta, ok := status.FromError(err); ok {
return "", output.NewError(output.APIError, sta.Message(), nil)
}
return "", output.NewError(output.NetworkError, "failed to invoke ReadContract api", err)
}
func isBalanceEnough(address string, act action.SealedEnvelope) error {
accountMeta, err := account.GetAccountMeta(address)
if err != nil {
return output.NewError(0, "failed to get account meta", err)
}
balance, ok := big.NewInt(0).SetString(accountMeta.Balance, 10)
if !ok {
return output.NewError(output.ConvertError, "failed to convert balance into big int", nil)
}
cost, err := act.Cost()
if err != nil {
return output.NewError(output.RuntimeError, "failed to check cost of an action", nil)
}
if balance.Cmp(cost) < 0 {
return output.NewError(output.ValidationError, "balance is not enough", nil)
}
return nil
}
| 1 | 23,634 | no need to add this flag query the endpoint set-up to determine the chainID | iotexproject-iotex-core | go |
@@ -0,0 +1,9 @@
+if (node.querySelector('input[type="submit"], img[type="submit"], button[type="submit"]')) {
+ return true;
+}
+
+if (!node.querySelectorAll(':not(textarea)').length) {
+ return false;
+}
+
+return undefined; | 1 | 1 | 13,237 | Here is a few scenario - What if there is a submit button with in a form, but is always disabled? | dequelabs-axe-core | js |
|
@@ -1498,6 +1498,7 @@ const instr_info_t * const op_instr[] =
#define Wed TYPE_W, OPSZ_16_vex32_evex64
#define Vex TYPE_V, OPSZ_16_vex32_evex64
#define Wex TYPE_W, OPSZ_16_vex32_evex64
+#define Weh_x TYPE_W, OPSZ_half_16_vex32_evex64
/* my own codes
* size m = 32 or 16 bit depending on addr size attribute | 1 | /* **********************************************************
* Copyright (c) 2011-2019 Google, Inc. All rights reserved.
* Copyright (c) 2001-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2001 Hewlett-Packard Company */
/* decode_table.c -- tables for decoding x86 instructions
*/
#include "../globals.h" /* need this to include decode.h (uint, etc.) */
#include "arch.h" /* need this to include decode.h (byte, etc. */
#include "instr.h" /* for REG_ constants */
#include "decode.h"
#include "decode_private.h"
/****************************************************************************
* All code below based on tables in the ``Intel Architecture Software
* Developer's Manual,'' Volume 2: Instruction Set Reference, 2001.
* Updated with information from later Intel manuals and AMD manuals.
*
* I added many new types not present in the Intel tables: see decode.h
*
* I don't list %eflags as a source or dest operand, but the particular
* flags written are encoded.
*
* XXX: some day it may be worth adding flags indicating which instrs
* are valid on which models of which processors (probably best to just add
* which cpuid flag must be set for the instr to be supported): for
* now though we do not rely on being able to predict which instrs are
* invalid.
*/
// We skip auto-formatting for the entire file to keep our aligned op_instr
// entries and our single-line table entries:
/* clang-format off */
/****************************************************************************
* Operand pointers into tables
* When there are multiple encodings of an opcode, this points to the first
* entry in a linked list.
* This array corresponds with the enum in opcode.h
* IF YOU CHANGE ONE YOU MUST CHANGE THE OTHER
*/
const instr_info_t * const op_instr[] =
{
/* OP_INVALID */ NULL,
/* OP_UNDECODED */ NULL,
/* OP_CONTD */ NULL,
/* OP_LABEL */ NULL,
/* OP_add */ &first_byte[0x05],
/* OP_or */ &first_byte[0x0d],
/* OP_adc */ &first_byte[0x15],
/* OP_sbb */ &first_byte[0x1d],
/* OP_and */ &first_byte[0x25],
/* OP_daa */ &first_byte[0x27],
/* OP_sub */ &first_byte[0x2d],
/* OP_das */ &first_byte[0x2f],
/* OP_xor */ &first_byte[0x35],
/* OP_aaa */ &first_byte[0x37],
/* OP_cmp */ &first_byte[0x3d],
/* OP_aas */ &first_byte[0x3f],
/* OP_inc */ &x64_extensions[0][0],
/* OP_dec */ &x64_extensions[8][0],
/* OP_push */ &first_byte[0x50],
/* OP_push_imm*/ &first_byte[0x68],
/* OP_pop */ &first_byte[0x58],
/* OP_pusha */ &first_byte[0x60],
/* OP_popa */ &first_byte[0x61],
/* OP_bound */ &evex_prefix_extensions[0][0],
/* OP_arpl */ &x64_extensions[16][0],
/* OP_imul */ &base_extensions[10][5],
/* OP_jo_short */ &first_byte[0x70],
/* OP_jno_short */ &first_byte[0x71],
/* OP_jb_short */ &first_byte[0x72],
/* OP_jnb_short */ &first_byte[0x73],
/* OP_jz_short */ &first_byte[0x74],
/* OP_jnz_short */ &first_byte[0x75],
/* OP_jbe_short */ &first_byte[0x76],
/* OP_jnbe_short */ &first_byte[0x77],
/* OP_js_short */ &first_byte[0x78],
/* OP_jns_short */ &first_byte[0x79],
/* OP_jp_short */ &first_byte[0x7a],
/* OP_jnp_short */ &first_byte[0x7b],
/* OP_jl_short */ &first_byte[0x7c],
/* OP_jnl_short */ &first_byte[0x7d],
/* OP_jle_short */ &first_byte[0x7e],
/* OP_jnle_short */ &first_byte[0x7f],
/* OP_call */ &first_byte[0xe8],
/* OP_call_ind */ &base_extensions[12][2],
/* OP_call_far */ &first_byte[0x9a],
/* OP_call_far_ind */ &base_extensions[12][3],
/* OP_jmp */ &first_byte[0xe9],
/* OP_jmp_short */ &first_byte[0xeb],
/* OP_jmp_ind */ &base_extensions[12][4],
/* OP_jmp_far */ &first_byte[0xea],
/* OP_jmp_far_ind */ &base_extensions[12][5],
/* OP_loopne */ &first_byte[0xe0],
/* OP_loope */ &first_byte[0xe1],
/* OP_loop */ &first_byte[0xe2],
/* OP_jecxz */ &first_byte[0xe3],
/* point ld & st at eAX & al instrs, they save 1 byte (no modrm),
* hopefully time taken considering them doesn't offset that */
/* OP_mov_ld */ &first_byte[0xa1],
/* OP_mov_st */ &first_byte[0xa3],
/* PR 250397: store of immed is mov_st not mov_imm, even though can be immed->reg,
* which we address by sharing part of the mov_st template chain */
/* OP_mov_imm */ &first_byte[0xb8],
/* OP_mov_seg */ &first_byte[0x8e],
/* OP_mov_priv */ &second_byte[0x20],
/* OP_test */ &first_byte[0xa9],
/* OP_lea */ &first_byte[0x8d],
/* OP_xchg */ &first_byte[0x91],
/* OP_cwde */ &first_byte[0x98],
/* OP_cdq */ &first_byte[0x99],
/* OP_fwait */ &first_byte[0x9b],
/* OP_pushf */ &first_byte[0x9c],
/* OP_popf */ &first_byte[0x9d],
/* OP_sahf */ &first_byte[0x9e],
/* OP_lahf */ &first_byte[0x9f],
/* OP_ret */ &first_byte[0xc2],
/* OP_ret_far */ &first_byte[0xca],
/* OP_les */ &vex_prefix_extensions[0][0],
/* OP_lds */ &vex_prefix_extensions[1][0],
/* OP_enter */ &first_byte[0xc8],
/* OP_leave */ &first_byte[0xc9],
/* OP_int3 */ &first_byte[0xcc],
/* OP_int */ &first_byte[0xcd],
/* OP_into */ &first_byte[0xce],
/* OP_iret */ &first_byte[0xcf],
/* OP_aam */ &first_byte[0xd4],
/* OP_aad */ &first_byte[0xd5],
/* OP_xlat */ &first_byte[0xd7],
/* OP_in */ &first_byte[0xe5],
/* OP_out */ &first_byte[0xe7],
/* OP_hlt */ &first_byte[0xf4],
/* OP_cmc */ &first_byte[0xf5],
/* OP_clc */ &first_byte[0xf8],
/* OP_stc */ &first_byte[0xf9],
/* OP_cli */ &first_byte[0xfa],
/* OP_sti */ &first_byte[0xfb],
/* OP_cld */ &first_byte[0xfc],
/* OP_std */ &first_byte[0xfd],
/* OP_lar */ &second_byte[0x02],
/* OP_lsl */ &second_byte[0x03],
/* OP_syscall */ &second_byte[0x05],
/* OP_clts */ &second_byte[0x06],
/* OP_sysret */ &second_byte[0x07],
/* OP_invd */ &second_byte[0x08],
/* OP_wbinvd */ &second_byte[0x09],
/* OP_ud2a */ &second_byte[0x0b],
/* OP_nop_modrm */ &second_byte[0x1f],
/* OP_movntps */ &prefix_extensions[11][0],
/* OP_movntpd */ &prefix_extensions[11][2],
/* OP_wrmsr */ &second_byte[0x30],
/* OP_rdtsc */ &second_byte[0x31],
/* OP_rdmsr */ &second_byte[0x32],
/* OP_rdpmc */ &second_byte[0x33],
/* OP_sysenter */ &second_byte[0x34],
/* OP_sysexit */ &second_byte[0x35],
/* OP_cmovo */ &second_byte[0x40],
/* OP_cmovno */ &e_vex_extensions[83][0],
/* OP_cmovb */ &e_vex_extensions[84][0],
/* OP_cmovnb */ &second_byte[0x43],
/* OP_cmovz */ &e_vex_extensions[86][0],
/* OP_cmovnz */ &e_vex_extensions[87][0],
/* OP_cmovbe */ &e_vex_extensions[88][0],
/* OP_cmovnbe */ &e_vex_extensions[89][0],
/* OP_cmovs */ &second_byte[0x48],
/* OP_cmovns */ &second_byte[0x49],
/* OP_cmovp */ &e_vex_extensions[90][0],
/* OP_cmovnp */ &e_vex_extensions[85][0],
/* OP_cmovl */ &second_byte[0x4c],
/* OP_cmovnl */ &second_byte[0x4d],
/* OP_cmovle */ &second_byte[0x4e],
/* OP_cmovnle */ &second_byte[0x4f],
/* OP_punpcklbw */ &prefix_extensions[32][0],
/* OP_punpcklwd */ &prefix_extensions[33][0],
/* OP_punpckldq */ &prefix_extensions[34][0],
/* OP_packsswb */ &prefix_extensions[35][0],
/* OP_pcmpgtb */ &prefix_extensions[36][0],
/* OP_pcmpgtw */ &prefix_extensions[37][0],
/* OP_pcmpgtd */ &prefix_extensions[38][0],
/* OP_packuswb */ &prefix_extensions[39][0],
/* OP_punpckhbw */ &prefix_extensions[40][0],
/* OP_punpckhwd */ &prefix_extensions[41][0],
/* OP_punpckhdq */ &prefix_extensions[42][0],
/* OP_packssdw */ &prefix_extensions[43][0],
/* OP_punpcklqdq */ &prefix_extensions[44][2],
/* OP_punpckhqdq */ &prefix_extensions[45][2],
/* OP_movd */ &prefix_extensions[46][0],
/* OP_movq */ &prefix_extensions[112][0],
/* OP_movdqu */ &prefix_extensions[112][1],
/* OP_movdqa */ &prefix_extensions[112][2],
/* OP_pshufw */ &prefix_extensions[47][0],
/* OP_pshufd */ &prefix_extensions[47][2],
/* OP_pshufhw */ &prefix_extensions[47][1],
/* OP_pshuflw */ &prefix_extensions[47][3],
/* OP_pcmpeqb */ &prefix_extensions[48][0],
/* OP_pcmpeqw */ &prefix_extensions[49][0],
/* OP_pcmpeqd */ &prefix_extensions[50][0],
/* OP_emms */ &vex_L_extensions[0][0],
/* OP_jo */ &second_byte[0x80],
/* OP_jno */ &second_byte[0x81],
/* OP_jb */ &second_byte[0x82],
/* OP_jnb */ &second_byte[0x83],
/* OP_jz */ &second_byte[0x84],
/* OP_jnz */ &second_byte[0x85],
/* OP_jbe */ &second_byte[0x86],
/* OP_jnbe */ &second_byte[0x87],
/* OP_js */ &second_byte[0x88],
/* OP_jns */ &second_byte[0x89],
/* OP_jp */ &second_byte[0x8a],
/* OP_jnp */ &second_byte[0x8b],
/* OP_jl */ &second_byte[0x8c],
/* OP_jnl */ &second_byte[0x8d],
/* OP_jle */ &second_byte[0x8e],
/* OP_jnle */ &second_byte[0x8f],
/* OP_seto */ &e_vex_extensions[79][0],
/* OP_setno */ &e_vex_extensions[80][0],
/* OP_setb */ &e_vex_extensions[81][0],
/* OP_setnb */ &e_vex_extensions[82][0],
/* OP_setz */ &second_byte[0x94],
/* OP_setnz */ &second_byte[0x95],
/* OP_setbe */ &second_byte[0x96],
/* OP_setnbe */ &second_byte[0x97],
/* OP_sets */ &e_vex_extensions[91][0],
/* OP_setns */ &e_vex_extensions[92][0],
/* OP_setp */ &second_byte[0x9a],
/* OP_setnp */ &second_byte[0x9b],
/* OP_setl */ &second_byte[0x9c],
/* OP_setnl */ &second_byte[0x9d],
/* OP_setle */ &second_byte[0x9e],
/* OP_setnle */ &second_byte[0x9f],
/* OP_cpuid */ &second_byte[0xa2],
/* OP_bt */ &second_byte[0xa3],
/* OP_shld */ &second_byte[0xa4],
/* OP_rsm */ &second_byte[0xaa],
/* OP_bts */ &second_byte[0xab],
/* OP_shrd */ &second_byte[0xac],
/* OP_cmpxchg */ &second_byte[0xb1],
/* OP_lss */ &second_byte[0xb2],
/* OP_btr */ &second_byte[0xb3],
/* OP_lfs */ &second_byte[0xb4],
/* OP_lgs */ &second_byte[0xb5],
/* OP_movzx */ &second_byte[0xb7],
/* OP_ud2b */ &second_byte[0xb9],
/* OP_btc */ &second_byte[0xbb],
/* OP_bsf */ &prefix_extensions[140][0],
/* OP_bsr */ &prefix_extensions[136][0],
/* OP_movsx */ &second_byte[0xbf],
/* OP_xadd */ &second_byte[0xc1],
/* OP_movnti */ &second_byte[0xc3],
/* OP_pinsrw */ &prefix_extensions[53][0],
/* OP_pextrw */ &prefix_extensions[54][0],
/* OP_bswap */ &second_byte[0xc8],
/* OP_psrlw */ &prefix_extensions[56][0],
/* OP_psrld */ &prefix_extensions[57][0],
/* OP_psrlq */ &prefix_extensions[58][0],
/* OP_paddq */ &prefix_extensions[59][0],
/* OP_pmullw */ &prefix_extensions[60][0],
/* OP_pmovmskb */ &prefix_extensions[62][0],
/* OP_psubusb */ &prefix_extensions[63][0],
/* OP_psubusw */ &prefix_extensions[64][0],
/* OP_pminub */ &prefix_extensions[65][0],
/* OP_pand */ &prefix_extensions[66][0],
/* OP_paddusb */ &prefix_extensions[67][0],
/* OP_paddusw */ &prefix_extensions[68][0],
/* OP_pmaxub */ &prefix_extensions[69][0],
/* OP_pandn */ &prefix_extensions[70][0],
/* OP_pavgb */ &prefix_extensions[71][0],
/* OP_psraw */ &prefix_extensions[72][0],
/* OP_psrad */ &prefix_extensions[73][0],
/* OP_pavgw */ &prefix_extensions[74][0],
/* OP_pmulhuw */ &prefix_extensions[75][0],
/* OP_pmulhw */ &prefix_extensions[76][0],
/* OP_movntq */ &prefix_extensions[78][0],
/* OP_movntdq */ &prefix_extensions[78][2],
/* OP_psubsb */ &prefix_extensions[79][0],
/* OP_psubsw */ &prefix_extensions[80][0],
/* OP_pminsw */ &prefix_extensions[81][0],
/* OP_por */ &prefix_extensions[82][0],
/* OP_paddsb */ &prefix_extensions[83][0],
/* OP_paddsw */ &prefix_extensions[84][0],
/* OP_pmaxsw */ &prefix_extensions[85][0],
/* OP_pxor */ &prefix_extensions[86][0],
/* OP_psllw */ &prefix_extensions[87][0],
/* OP_pslld */ &prefix_extensions[88][0],
/* OP_psllq */ &prefix_extensions[89][0],
/* OP_pmuludq */ &prefix_extensions[90][0],
/* OP_pmaddwd */ &prefix_extensions[91][0],
/* OP_psadbw */ &prefix_extensions[92][0],
/* OP_maskmovq */ &prefix_extensions[93][0],
/* OP_maskmovdqu */ &prefix_extensions[93][2],
/* OP_psubb */ &prefix_extensions[94][0],
/* OP_psubw */ &prefix_extensions[95][0],
/* OP_psubd */ &prefix_extensions[96][0],
/* OP_psubq */ &prefix_extensions[97][0],
/* OP_paddb */ &prefix_extensions[98][0],
/* OP_paddw */ &prefix_extensions[99][0],
/* OP_paddd */ &prefix_extensions[100][0],
/* OP_psrldq */ &prefix_extensions[101][2],
/* OP_pslldq */ &prefix_extensions[102][2],
/* OP_rol */ &base_extensions[ 4][0],
/* OP_ror */ &base_extensions[ 4][1],
/* OP_rcl */ &base_extensions[ 4][2],
/* OP_rcr */ &base_extensions[ 4][3],
/* OP_shl */ &base_extensions[ 4][4],
/* OP_shr */ &base_extensions[ 4][5],
/* OP_sar */ &base_extensions[ 4][7],
/* OP_not */ &base_extensions[10][2],
/* OP_neg */ &base_extensions[10][3],
/* OP_mul */ &base_extensions[10][4],
/* OP_div */ &base_extensions[10][6],
/* OP_idiv */ &base_extensions[10][7],
/* OP_sldt */ &base_extensions[13][0],
/* OP_str */ &base_extensions[13][1],
/* OP_lldt */ &base_extensions[13][2],
/* OP_ltr */ &base_extensions[13][3],
/* OP_verr */ &base_extensions[13][4],
/* OP_verw */ &base_extensions[13][5],
/* OP_sgdt */ &mod_extensions[0][0],
/* OP_sidt */ &mod_extensions[1][0],
/* OP_lgdt */ &mod_extensions[5][0],
/* OP_lidt */ &mod_extensions[4][0],
/* OP_smsw */ &base_extensions[14][4],
/* OP_lmsw */ &base_extensions[14][6],
/* OP_invlpg */ &mod_extensions[2][0],
/* OP_cmpxchg8b */ &base_extensions[16][1],
/* OP_fxsave32 */ &rex_w_extensions[0][0],
/* OP_fxrstor32 */ &rex_w_extensions[1][0],
/* OP_ldmxcsr */ &e_vex_extensions[61][0],
/* OP_stmxcsr */ &e_vex_extensions[62][0],
/* OP_lfence */ &mod_extensions[6][1],
/* OP_mfence */ &mod_extensions[7][1],
/* OP_clflush */ &mod_extensions[3][0],
/* OP_sfence */ &mod_extensions[3][1],
/* OP_prefetchnta */ &base_extensions[23][0],
/* OP_prefetcht0 */ &base_extensions[23][1],
/* OP_prefetcht1 */ &base_extensions[23][2],
/* OP_prefetcht2 */ &base_extensions[23][3],
/* OP_prefetch */ &base_extensions[24][0],
/* OP_prefetchw */ &base_extensions[24][1],
/* OP_movups */ &prefix_extensions[ 0][0],
/* OP_movss */ &mod_extensions[18][0],
/* OP_movupd */ &prefix_extensions[ 0][2],
/* OP_movsd */ &mod_extensions[19][0],
/* OP_movlps */ &prefix_extensions[ 2][0],
/* OP_movlpd */ &prefix_extensions[ 2][2],
/* OP_unpcklps */ &prefix_extensions[ 4][0],
/* OP_unpcklpd */ &prefix_extensions[ 4][2],
/* OP_unpckhps */ &prefix_extensions[ 5][0],
/* OP_unpckhpd */ &prefix_extensions[ 5][2],
/* OP_movhps */ &prefix_extensions[ 6][0],
/* OP_movhpd */ &prefix_extensions[ 6][2],
/* OP_movaps */ &prefix_extensions[ 8][0],
/* OP_movapd */ &prefix_extensions[ 8][2],
/* OP_cvtpi2ps */ &prefix_extensions[10][0],
/* OP_cvtsi2ss */ &prefix_extensions[10][1],
/* OP_cvtpi2pd */ &prefix_extensions[10][2],
/* OP_cvtsi2sd */ &prefix_extensions[10][3],
/* OP_cvttps2pi */ &prefix_extensions[12][0],
/* OP_cvttss2si */ &prefix_extensions[12][1],
/* OP_cvttpd2pi */ &prefix_extensions[12][2],
/* OP_cvttsd2si */ &prefix_extensions[12][3],
/* OP_cvtps2pi */ &prefix_extensions[13][0],
/* OP_cvtss2si */ &prefix_extensions[13][1],
/* OP_cvtpd2pi */ &prefix_extensions[13][2],
/* OP_cvtsd2si */ &prefix_extensions[13][3],
/* OP_ucomiss */ &prefix_extensions[14][0],
/* OP_ucomisd */ &prefix_extensions[14][2],
/* OP_comiss */ &prefix_extensions[15][0],
/* OP_comisd */ &prefix_extensions[15][2],
/* OP_movmskps */ &prefix_extensions[16][0],
/* OP_movmskpd */ &prefix_extensions[16][2],
/* OP_sqrtps */ &prefix_extensions[17][0],
/* OP_sqrtss */ &prefix_extensions[17][1],
/* OP_sqrtpd */ &prefix_extensions[17][2],
/* OP_sqrtsd */ &prefix_extensions[17][3],
/* OP_rsqrtps */ &prefix_extensions[18][0],
/* OP_rsqrtss */ &prefix_extensions[18][1],
/* OP_rcpps */ &prefix_extensions[19][0],
/* OP_rcpss */ &prefix_extensions[19][1],
/* OP_andps */ &prefix_extensions[20][0],
/* OP_andpd */ &prefix_extensions[20][2],
/* OP_andnps */ &prefix_extensions[21][0],
/* OP_andnpd */ &prefix_extensions[21][2],
/* OP_orps */ &prefix_extensions[22][0],
/* OP_orpd */ &prefix_extensions[22][2],
/* OP_xorps */ &prefix_extensions[23][0],
/* OP_xorpd */ &prefix_extensions[23][2],
/* OP_addps */ &prefix_extensions[24][0],
/* OP_addss */ &prefix_extensions[24][1],
/* OP_addpd */ &prefix_extensions[24][2],
/* OP_addsd */ &prefix_extensions[24][3],
/* OP_mulps */ &prefix_extensions[25][0],
/* OP_mulss */ &prefix_extensions[25][1],
/* OP_mulpd */ &prefix_extensions[25][2],
/* OP_mulsd */ &prefix_extensions[25][3],
/* OP_cvtps2pd */ &prefix_extensions[26][0],
/* OP_cvtss2sd */ &prefix_extensions[26][1],
/* OP_cvtpd2ps */ &prefix_extensions[26][2],
/* OP_cvtsd2ss */ &prefix_extensions[26][3],
/* OP_cvtdq2ps */ &prefix_extensions[27][0],
/* OP_cvttps2dq */ &prefix_extensions[27][1],
/* OP_cvtps2dq */ &prefix_extensions[27][2],
/* OP_subps */ &prefix_extensions[28][0],
/* OP_subss */ &prefix_extensions[28][1],
/* OP_subpd */ &prefix_extensions[28][2],
/* OP_subsd */ &prefix_extensions[28][3],
/* OP_minps */ &prefix_extensions[29][0],
/* OP_minss */ &prefix_extensions[29][1],
/* OP_minpd */ &prefix_extensions[29][2],
/* OP_minsd */ &prefix_extensions[29][3],
/* OP_divps */ &prefix_extensions[30][0],
/* OP_divss */ &prefix_extensions[30][1],
/* OP_divpd */ &prefix_extensions[30][2],
/* OP_divsd */ &prefix_extensions[30][3],
/* OP_maxps */ &prefix_extensions[31][0],
/* OP_maxss */ &prefix_extensions[31][1],
/* OP_maxpd */ &prefix_extensions[31][2],
/* OP_maxsd */ &prefix_extensions[31][3],
/* OP_cmpps */ &prefix_extensions[52][0],
/* OP_cmpss */ &prefix_extensions[52][1],
/* OP_cmppd */ &prefix_extensions[52][2],
/* OP_cmpsd */ &prefix_extensions[52][3],
/* OP_shufps */ &prefix_extensions[55][0],
/* OP_shufpd */ &prefix_extensions[55][2],
/* OP_cvtdq2pd */ &prefix_extensions[77][1],
/* OP_cvttpd2dq */ &prefix_extensions[77][2],
/* OP_cvtpd2dq */ &prefix_extensions[77][3],
/* OP_nop */ &rex_b_extensions[0][0],
/* OP_pause */ &prefix_extensions[103][1],
/* OP_ins */ &rep_extensions[1][0],
/* OP_rep_ins */ &rep_extensions[1][2],
/* OP_outs */ &rep_extensions[3][0],
/* OP_rep_outs */ &rep_extensions[3][2],
/* OP_movs */ &rep_extensions[5][0],
/* OP_rep_movs */ &rep_extensions[5][2],
/* OP_stos */ &rep_extensions[7][0],
/* OP_rep_stos */ &rep_extensions[7][2],
/* OP_lods */ &rep_extensions[9][0],
/* OP_rep_lods */ &rep_extensions[9][2],
/* OP_cmps */ &repne_extensions[1][0],
/* OP_rep_cmps */ &repne_extensions[1][2],
/* OP_repne_cmps */ &repne_extensions[1][4],
/* OP_scas */ &repne_extensions[3][0],
/* OP_rep_scas */ &repne_extensions[3][2],
/* OP_repne_scas */ &repne_extensions[3][4],
/* OP_fadd */ &float_low_modrm[0x00],
/* OP_fmul */ &float_low_modrm[0x01],
/* OP_fcom */ &float_low_modrm[0x02],
/* OP_fcomp */ &float_low_modrm[0x03],
/* OP_fsub */ &float_low_modrm[0x04],
/* OP_fsubr */ &float_low_modrm[0x05],
/* OP_fdiv */ &float_low_modrm[0x06],
/* OP_fdivr */ &float_low_modrm[0x07],
/* OP_fld */ &float_low_modrm[0x08],
/* OP_fst */ &float_low_modrm[0x0a],
/* OP_fstp */ &float_low_modrm[0x0b],
/* OP_fldenv */ &float_low_modrm[0x0c],
/* OP_fldcw */ &float_low_modrm[0x0d],
/* OP_fnstenv */ &float_low_modrm[0x0e],
/* OP_fnstcw */ &float_low_modrm[0x0f],
/* OP_fiadd */ &float_low_modrm[0x10],
/* OP_fimul */ &float_low_modrm[0x11],
/* OP_ficom */ &float_low_modrm[0x12],
/* OP_ficomp */ &float_low_modrm[0x13],
/* OP_fisub */ &float_low_modrm[0x14],
/* OP_fisubr */ &float_low_modrm[0x15],
/* OP_fidiv */ &float_low_modrm[0x16],
/* OP_fidivr */ &float_low_modrm[0x17],
/* OP_fild */ &float_low_modrm[0x18],
/* OP_fist */ &float_low_modrm[0x1a],
/* OP_fistp */ &float_low_modrm[0x1b],
/* OP_frstor */ &float_low_modrm[0x2c],
/* OP_fnsave */ &float_low_modrm[0x2e],
/* OP_fnstsw */ &float_low_modrm[0x2f],
/* OP_fbld */ &float_low_modrm[0x3c],
/* OP_fbstp */ &float_low_modrm[0x3e],
/* OP_fxch */ &float_high_modrm[1][0x08],
/* OP_fnop */ &float_high_modrm[1][0x10],
/* OP_fchs */ &float_high_modrm[1][0x20],
/* OP_fabs */ &float_high_modrm[1][0x21],
/* OP_ftst */ &float_high_modrm[1][0x24],
/* OP_fxam */ &float_high_modrm[1][0x25],
/* OP_fld1 */ &float_high_modrm[1][0x28],
/* OP_fldl2t */ &float_high_modrm[1][0x29],
/* OP_fldl2e */ &float_high_modrm[1][0x2a],
/* OP_fldpi */ &float_high_modrm[1][0x2b],
/* OP_fldlg2 */ &float_high_modrm[1][0x2c],
/* OP_fldln2 */ &float_high_modrm[1][0x2d],
/* OP_fldz */ &float_high_modrm[1][0x2e],
/* OP_f2xm1 */ &float_high_modrm[1][0x30],
/* OP_fyl2x */ &float_high_modrm[1][0x31],
/* OP_fptan */ &float_high_modrm[1][0x32],
/* OP_fpatan */ &float_high_modrm[1][0x33],
/* OP_fxtract */ &float_high_modrm[1][0x34],
/* OP_fprem1 */ &float_high_modrm[1][0x35],
/* OP_fdecstp */ &float_high_modrm[1][0x36],
/* OP_fincstp */ &float_high_modrm[1][0x37],
/* OP_fprem */ &float_high_modrm[1][0x38],
/* OP_fyl2xp1 */ &float_high_modrm[1][0x39],
/* OP_fsqrt */ &float_high_modrm[1][0x3a],
/* OP_fsincos */ &float_high_modrm[1][0x3b],
/* OP_frndint */ &float_high_modrm[1][0x3c],
/* OP_fscale */ &float_high_modrm[1][0x3d],
/* OP_fsin */ &float_high_modrm[1][0x3e],
/* OP_fcos */ &float_high_modrm[1][0x3f],
/* OP_fcmovb */ &float_high_modrm[2][0x00],
/* OP_fcmove */ &float_high_modrm[2][0x08],
/* OP_fcmovbe */ &float_high_modrm[2][0x10],
/* OP_fcmovu */ &float_high_modrm[2][0x18],
/* OP_fucompp */ &float_high_modrm[2][0x29],
/* OP_fcmovnb */ &float_high_modrm[3][0x00],
/* OP_fcmovne */ &float_high_modrm[3][0x08],
/* OP_fcmovnbe */ &float_high_modrm[3][0x10],
/* OP_fcmovnu */ &float_high_modrm[3][0x18],
/* OP_fnclex */ &float_high_modrm[3][0x22],
/* OP_fninit */ &float_high_modrm[3][0x23],
/* OP_fucomi */ &float_high_modrm[3][0x28],
/* OP_fcomi */ &float_high_modrm[3][0x30],
/* OP_ffree */ &float_high_modrm[5][0x00],
/* OP_fucom */ &float_high_modrm[5][0x20],
/* OP_fucomp */ &float_high_modrm[5][0x28],
/* OP_faddp */ &float_high_modrm[6][0x00],
/* OP_fmulp */ &float_high_modrm[6][0x08],
/* OP_fcompp */ &float_high_modrm[6][0x19],
/* OP_fsubrp */ &float_high_modrm[6][0x20],
/* OP_fsubp */ &float_high_modrm[6][0x28],
/* OP_fdivrp */ &float_high_modrm[6][0x30],
/* OP_fdivp */ &float_high_modrm[6][0x38],
/* OP_fucomip */ &float_high_modrm[7][0x28],
/* OP_fcomip */ &float_high_modrm[7][0x30],
/* SSE3 instructions */
/* OP_fisttp */ &float_low_modrm[0x29],
/* OP_haddpd */ &prefix_extensions[114][2],
/* OP_haddps */ &prefix_extensions[114][3],
/* OP_hsubpd */ &prefix_extensions[115][2],
/* OP_hsubps */ &prefix_extensions[115][3],
/* OP_addsubpd */ &prefix_extensions[116][2],
/* OP_addsubps */ &prefix_extensions[116][3],
/* OP_lddqu */ &prefix_extensions[117][3],
/* OP_monitor */ &rm_extensions[1][0],
/* OP_mwait */ &rm_extensions[1][1],
/* OP_movsldup */ &prefix_extensions[ 2][1],
/* OP_movshdup */ &prefix_extensions[ 6][1],
/* OP_movddup */ &prefix_extensions[ 2][3],
/* 3D-Now! instructions */
/* OP_femms */ &second_byte[0x0e],
/* OP_unknown_3dnow */ &suffix_extensions[0],
/* OP_pavgusb */ &suffix_extensions[1],
/* OP_pfadd */ &suffix_extensions[2],
/* OP_pfacc */ &suffix_extensions[3],
/* OP_pfcmpge */ &suffix_extensions[4],
/* OP_pfcmpgt */ &suffix_extensions[5],
/* OP_pfcmpeq */ &suffix_extensions[6],
/* OP_pfmin */ &suffix_extensions[7],
/* OP_pfmax */ &suffix_extensions[8],
/* OP_pfmul */ &suffix_extensions[9],
/* OP_pfrcp */ &suffix_extensions[10],
/* OP_pfrcpit1 */ &suffix_extensions[11],
/* OP_pfrcpit2 */ &suffix_extensions[12],
/* OP_pfrsqrt */ &suffix_extensions[13],
/* OP_pfrsqit1 */ &suffix_extensions[14],
/* OP_pmulhrw */ &suffix_extensions[15],
/* OP_pfsub */ &suffix_extensions[16],
/* OP_pfsubr */ &suffix_extensions[17],
/* OP_pi2fd */ &suffix_extensions[18],
/* OP_pf2id */ &suffix_extensions[19],
/* OP_pi2fw */ &suffix_extensions[20],
/* OP_pf2iw */ &suffix_extensions[21],
/* OP_pfnacc */ &suffix_extensions[22],
/* OP_pfpnacc */ &suffix_extensions[23],
/* OP_pswapd */ &suffix_extensions[24],
/* SSSE3 */
/* OP_pshufb */ &prefix_extensions[118][0],
/* OP_phaddw */ &prefix_extensions[119][0],
/* OP_phaddd */ &prefix_extensions[120][0],
/* OP_phaddsw */ &prefix_extensions[121][0],
/* OP_pmaddubsw */ &prefix_extensions[122][0],
/* OP_phsubw */ &prefix_extensions[123][0],
/* OP_phsubd */ &prefix_extensions[124][0],
/* OP_phsubsw */ &prefix_extensions[125][0],
/* OP_psignb */ &prefix_extensions[126][0],
/* OP_psignw */ &prefix_extensions[127][0],
/* OP_psignd */ &prefix_extensions[128][0],
/* OP_pmulhrsw */ &prefix_extensions[129][0],
/* OP_pabsb */ &prefix_extensions[130][0],
/* OP_pabsw */ &prefix_extensions[131][0],
/* OP_pabsd */ &prefix_extensions[132][0],
/* OP_palignr */ &prefix_extensions[133][0],
/* SSE4 (incl AMD (SSE4A) and Intel-specific (SSE4.1, SSE4.2) extensions */
/* OP_popcnt */ &second_byte[0xb8],
/* OP_movntss */ &prefix_extensions[11][1],
/* OP_movntsd */ &prefix_extensions[11][3],
/* OP_extrq */ &prefix_extensions[134][2],
/* OP_insertq */ &prefix_extensions[134][3],
/* OP_lzcnt */ &prefix_extensions[136][1],
/* OP_pblendvb */ &third_byte_38[16],
/* OP_blendvps */ &third_byte_38[17],
/* OP_blendvpd */ &third_byte_38[18],
/* OP_ptest */ &e_vex_extensions[3][0],
/* OP_pmovsxbw */ &e_vex_extensions[4][0],
/* OP_pmovsxbd */ &e_vex_extensions[5][0],
/* OP_pmovsxbq */ &e_vex_extensions[6][0],
/* OP_pmovsxwd */ &e_vex_extensions[7][0],
/* OP_pmovsxwq */ &e_vex_extensions[8][0],
/* OP_pmovsxdq */ &e_vex_extensions[9][0],
/* OP_pmuldq */ &e_vex_extensions[10][0],
/* OP_pcmpeqq */ &e_vex_extensions[11][0],
/* OP_movntdqa */ &e_vex_extensions[12][0],
/* OP_packusdw */ &e_vex_extensions[13][0],
/* OP_pmovzxbw */ &e_vex_extensions[14][0],
/* OP_pmovzxbd */ &e_vex_extensions[15][0],
/* OP_pmovzxbq */ &e_vex_extensions[16][0],
/* OP_pmovzxwd */ &e_vex_extensions[17][0],
/* OP_pmovzxwq */ &e_vex_extensions[18][0],
/* OP_pmovzxdq */ &e_vex_extensions[19][0],
/* OP_pcmpgtq */ &e_vex_extensions[20][0],
/* OP_pminsb */ &e_vex_extensions[21][0],
/* OP_pminsd */ &e_vex_extensions[22][0],
/* OP_pminuw */ &e_vex_extensions[23][0],
/* OP_pminud */ &e_vex_extensions[24][0],
/* OP_pmaxsb */ &e_vex_extensions[25][0],
/* OP_pmaxsd */ &e_vex_extensions[26][0],
/* OP_pmaxuw */ &e_vex_extensions[27][0],
/* OP_pmaxud */ &e_vex_extensions[28][0],
/* OP_pmulld */ &e_vex_extensions[29][0],
/* OP_phminposuw */ &e_vex_extensions[30][0],
/* OP_crc32 */ &prefix_extensions[139][3],
/* OP_pextrb */ &e_vex_extensions[36][0],
/* OP_pextrd */ &e_vex_extensions[38][0],
/* OP_extractps */ &e_vex_extensions[39][0],
/* OP_roundps */ &e_vex_extensions[40][0],
/* OP_roundpd */ &e_vex_extensions[41][0],
/* OP_roundss */ &e_vex_extensions[42][0],
/* OP_roundsd */ &e_vex_extensions[43][0],
/* OP_blendps */ &e_vex_extensions[44][0],
/* OP_blendpd */ &e_vex_extensions[45][0],
/* OP_pblendw */ &e_vex_extensions[46][0],
/* OP_pinsrb */ &e_vex_extensions[47][0],
/* OP_insertps */ &e_vex_extensions[48][0],
/* OP_pinsrd */ &e_vex_extensions[49][0],
/* OP_dpps */ &e_vex_extensions[50][0],
/* OP_dppd */ &e_vex_extensions[51][0],
/* OP_mpsadbw */ &e_vex_extensions[52][0],
/* OP_pcmpestrm */ &e_vex_extensions[53][0],
/* OP_pcmpestri */ &e_vex_extensions[54][0],
/* OP_pcmpistrm */ &e_vex_extensions[55][0],
/* OP_pcmpistri */ &e_vex_extensions[56][0],
/* x64 */
/* OP_movsxd */ &x64_extensions[16][1],
/* OP_swapgs */ &rm_extensions[2][0],
/* VMX */
/* OP_vmcall */ &rm_extensions[0][1],
/* OP_vmlaunch */ &rm_extensions[0][2],
/* OP_vmresume */ &rm_extensions[0][3],
/* OP_vmxoff */ &rm_extensions[0][4],
/* OP_vmptrst */ &mod_extensions[13][0],
/* OP_vmptrld */ &prefix_extensions[137][0],
/* OP_vmxon */ &prefix_extensions[137][1],
/* OP_vmclear */ &prefix_extensions[137][2],
/* OP_vmread */ &prefix_extensions[134][0],
/* OP_vmwrite */ &prefix_extensions[135][0],
/* undocumented */
/* OP_int1 */ &first_byte[0xf1],
/* OP_salc */ &first_byte[0xd6],
/* OP_ffreep */ &float_high_modrm[7][0x00],
/* AMD SVM */
/* OP_vmrun */ &rm_extensions[3][0],
/* OP_vmmcall */ &rm_extensions[3][1],
/* OP_vmload */ &rm_extensions[3][2],
/* OP_vmsave */ &rm_extensions[3][3],
/* OP_stgi */ &rm_extensions[3][4],
/* OP_clgi */ &rm_extensions[3][5],
/* OP_skinit */ &rm_extensions[3][6],
/* OP_invlpga */ &rm_extensions[3][7],
/* AMD though not part of SVM */
/* OP_rdtscp */ &rm_extensions[2][1],
/* Intel VMX additions */
/* OP_invept */ &third_byte_38[49],
/* OP_invvpid */ &third_byte_38[50],
/* added in Intel Westmere */
/* OP_pclmulqdq */ &e_vex_extensions[57][0],
/* OP_aesimc */ &e_vex_extensions[31][0],
/* OP_aesenc */ &e_vex_extensions[32][0],
/* OP_aesenclast */ &e_vex_extensions[33][0],
/* OP_aesdec */ &e_vex_extensions[34][0],
/* OP_aesdeclast */ &e_vex_extensions[35][0],
/* OP_aeskeygenassist*/ &e_vex_extensions[58][0],
/* added in Intel Atom */
/* OP_movbe */ &prefix_extensions[138][0],
/* added in Intel Sandy Bridge */
/* OP_xgetbv */ &rm_extensions[4][0],
/* OP_xsetbv */ &rm_extensions[4][1],
/* OP_xsave32 */ &rex_w_extensions[2][0],
/* OP_xrstor32 */ &rex_w_extensions[3][0],
/* OP_xsaveopt32 */ &rex_w_extensions[4][0],
/* AVX */
/* OP_vmovss */ &mod_extensions[ 8][0],
/* OP_vmovsd */ &mod_extensions[ 9][0],
/* OP_vmovups */ &prefix_extensions[ 0][4],
/* OP_vmovupd */ &prefix_extensions[ 0][6],
/* OP_vmovlps */ &prefix_extensions[ 2][4],
/* OP_vmovsldup */ &prefix_extensions[ 2][5],
/* OP_vmovlpd */ &prefix_extensions[ 2][6],
/* OP_vmovddup */ &prefix_extensions[ 2][7],
/* OP_vunpcklps */ &prefix_extensions[ 4][4],
/* OP_vunpcklpd */ &prefix_extensions[ 4][6],
/* OP_vunpckhps */ &prefix_extensions[ 5][4],
/* OP_vunpckhpd */ &prefix_extensions[ 5][6],
/* OP_vmovhps */ &prefix_extensions[ 6][4],
/* OP_vmovshdup */ &prefix_extensions[ 6][5],
/* OP_vmovhpd */ &prefix_extensions[ 6][6],
/* OP_vmovaps */ &prefix_extensions[ 8][4],
/* OP_vmovapd */ &prefix_extensions[ 8][6],
/* OP_vcvtsi2ss */ &prefix_extensions[10][5],
/* OP_vcvtsi2sd */ &prefix_extensions[10][7],
/* OP_vmovntps */ &prefix_extensions[11][4],
/* OP_vmovntpd */ &prefix_extensions[11][6],
/* OP_vcvttss2si */ &prefix_extensions[12][5],
/* OP_vcvttsd2si */ &prefix_extensions[12][7],
/* OP_vcvtss2si */ &prefix_extensions[13][5],
/* OP_vcvtsd2si */ &prefix_extensions[13][7],
/* OP_vucomiss */ &prefix_extensions[14][4],
/* OP_vucomisd */ &prefix_extensions[14][6],
/* OP_vcomiss */ &prefix_extensions[15][4],
/* OP_vcomisd */ &prefix_extensions[15][6],
/* OP_vmovmskps */ &prefix_extensions[16][4],
/* OP_vmovmskpd */ &prefix_extensions[16][6],
/* OP_vsqrtps */ &prefix_extensions[17][4],
/* OP_vsqrtss */ &prefix_extensions[17][5],
/* OP_vsqrtpd */ &prefix_extensions[17][6],
/* OP_vsqrtsd */ &prefix_extensions[17][7],
/* OP_vrsqrtps */ &prefix_extensions[18][4],
/* OP_vrsqrtss */ &prefix_extensions[18][5],
/* OP_vrcpps */ &prefix_extensions[19][4],
/* OP_vrcpss */ &prefix_extensions[19][5],
/* OP_vandps */ &prefix_extensions[20][4],
/* OP_vandpd */ &prefix_extensions[20][6],
/* OP_vandnps */ &prefix_extensions[21][4],
/* OP_vandnpd */ &prefix_extensions[21][6],
/* OP_vorps */ &prefix_extensions[22][4],
/* OP_vorpd */ &prefix_extensions[22][6],
/* OP_vxorps */ &prefix_extensions[23][4],
/* OP_vxorpd */ &prefix_extensions[23][6],
/* OP_vaddps */ &prefix_extensions[24][4],
/* OP_vaddss */ &prefix_extensions[24][5],
/* OP_vaddpd */ &prefix_extensions[24][6],
/* OP_vaddsd */ &prefix_extensions[24][7],
/* OP_vmulps */ &prefix_extensions[25][4],
/* OP_vmulss */ &prefix_extensions[25][5],
/* OP_vmulpd */ &prefix_extensions[25][6],
/* OP_vmulsd */ &prefix_extensions[25][7],
/* OP_vcvtps2pd */ &prefix_extensions[26][4],
/* OP_vcvtss2sd */ &prefix_extensions[26][5],
/* OP_vcvtpd2ps */ &prefix_extensions[26][6],
/* OP_vcvtsd2ss */ &prefix_extensions[26][7],
/* OP_vcvtdq2ps */ &prefix_extensions[27][4],
/* OP_vcvttps2dq */ &prefix_extensions[27][5],
/* OP_vcvtps2dq */ &prefix_extensions[27][6],
/* OP_vsubps */ &prefix_extensions[28][4],
/* OP_vsubss */ &prefix_extensions[28][5],
/* OP_vsubpd */ &prefix_extensions[28][6],
/* OP_vsubsd */ &prefix_extensions[28][7],
/* OP_vminps */ &prefix_extensions[29][4],
/* OP_vminss */ &prefix_extensions[29][5],
/* OP_vminpd */ &prefix_extensions[29][6],
/* OP_vminsd */ &prefix_extensions[29][7],
/* OP_vdivps */ &prefix_extensions[30][4],
/* OP_vdivss */ &prefix_extensions[30][5],
/* OP_vdivpd */ &prefix_extensions[30][6],
/* OP_vdivsd */ &prefix_extensions[30][7],
/* OP_vmaxps */ &prefix_extensions[31][4],
/* OP_vmaxss */ &prefix_extensions[31][5],
/* OP_vmaxpd */ &prefix_extensions[31][6],
/* OP_vmaxsd */ &prefix_extensions[31][7],
/* OP_vpunpcklbw */ &prefix_extensions[32][6],
/* OP_vpunpcklwd */ &prefix_extensions[33][6],
/* OP_vpunpckldq */ &prefix_extensions[34][6],
/* OP_vpacksswb */ &prefix_extensions[35][6],
/* OP_vpcmpgtb */ &prefix_extensions[36][6],
/* OP_vpcmpgtw */ &prefix_extensions[37][6],
/* OP_vpcmpgtd */ &prefix_extensions[38][6],
/* OP_vpackuswb */ &prefix_extensions[39][6],
/* OP_vpunpckhbw */ &prefix_extensions[40][6],
/* OP_vpunpckhwd */ &prefix_extensions[41][6],
/* OP_vpunpckhdq */ &prefix_extensions[42][6],
/* OP_vpackssdw */ &prefix_extensions[43][6],
/* OP_vpunpcklqdq */ &prefix_extensions[44][6],
/* OP_vpunpckhqdq */ &prefix_extensions[45][6],
/* OP_vmovd */ &prefix_extensions[46][6],
/* OP_vpshufhw */ &prefix_extensions[47][5],
/* OP_vpshufd */ &prefix_extensions[47][6],
/* OP_vpshuflw */ &prefix_extensions[47][7],
/* OP_vpcmpeqb */ &prefix_extensions[48][6],
/* OP_vpcmpeqw */ &prefix_extensions[49][6],
/* OP_vpcmpeqd */ &prefix_extensions[50][6],
/* OP_vmovq */ &prefix_extensions[51][5],
/* OP_vcmpps */ &prefix_extensions[52][4],
/* OP_vcmpss */ &prefix_extensions[52][5],
/* OP_vcmppd */ &prefix_extensions[52][6],
/* OP_vcmpsd */ &prefix_extensions[52][7],
/* OP_vpinsrw */ &prefix_extensions[53][6],
/* OP_vpextrw */ &prefix_extensions[54][6],
/* OP_vshufps */ &prefix_extensions[55][4],
/* OP_vshufpd */ &prefix_extensions[55][6],
/* OP_vpsrlw */ &prefix_extensions[56][6],
/* OP_vpsrld */ &prefix_extensions[57][6],
/* OP_vpsrlq */ &prefix_extensions[58][6],
/* OP_vpaddq */ &prefix_extensions[59][6],
/* OP_vpmullw */ &prefix_extensions[60][6],
/* OP_vpmovmskb */ &prefix_extensions[62][6],
/* OP_vpsubusb */ &prefix_extensions[63][6],
/* OP_vpsubusw */ &prefix_extensions[64][6],
/* OP_vpminub */ &prefix_extensions[65][6],
/* OP_vpand */ &prefix_extensions[66][6],
/* OP_vpaddusb */ &prefix_extensions[67][6],
/* OP_vpaddusw */ &prefix_extensions[68][6],
/* OP_vpmaxub */ &prefix_extensions[69][6],
/* OP_vpandn */ &prefix_extensions[70][6],
/* OP_vpavgb */ &prefix_extensions[71][6],
/* OP_vpsraw */ &prefix_extensions[72][6],
/* OP_vpsrad */ &prefix_extensions[73][6],
/* OP_vpavgw */ &prefix_extensions[74][6],
/* OP_vpmulhuw */ &prefix_extensions[75][6],
/* OP_vpmulhw */ &prefix_extensions[76][6],
/* OP_vcvtdq2pd */ &prefix_extensions[77][5],
/* OP_vcvttpd2dq */ &prefix_extensions[77][6],
/* OP_vcvtpd2dq */ &prefix_extensions[77][7],
/* OP_vmovntdq */ &prefix_extensions[78][6],
/* OP_vpsubsb */ &prefix_extensions[79][6],
/* OP_vpsubsw */ &prefix_extensions[80][6],
/* OP_vpminsw */ &prefix_extensions[81][6],
/* OP_vpor */ &prefix_extensions[82][6],
/* OP_vpaddsb */ &prefix_extensions[83][6],
/* OP_vpaddsw */ &prefix_extensions[84][6],
/* OP_vpmaxsw */ &prefix_extensions[85][6],
/* OP_vpxor */ &prefix_extensions[86][6],
/* OP_vpsllw */ &prefix_extensions[87][6],
/* OP_vpslld */ &prefix_extensions[88][6],
/* OP_vpsllq */ &prefix_extensions[89][6],
/* OP_vpmuludq */ &prefix_extensions[90][6],
/* OP_vpmaddwd */ &prefix_extensions[91][6],
/* OP_vpsadbw */ &prefix_extensions[92][6],
/* OP_vmaskmovdqu */ &prefix_extensions[93][6],
/* OP_vpsubb */ &prefix_extensions[94][6],
/* OP_vpsubw */ &prefix_extensions[95][6],
/* OP_vpsubd */ &prefix_extensions[96][6],
/* OP_vpsubq */ &prefix_extensions[97][6],
/* OP_vpaddb */ &prefix_extensions[98][6],
/* OP_vpaddw */ &prefix_extensions[99][6],
/* OP_vpaddd */ &prefix_extensions[100][6],
/* OP_vpsrldq */ &prefix_extensions[101][6],
/* OP_vpslldq */ &prefix_extensions[102][6],
/* OP_vmovdqu */ &prefix_extensions[112][5],
/* OP_vmovdqa */ &prefix_extensions[112][6],
/* OP_vhaddpd */ &prefix_extensions[114][6],
/* OP_vhaddps */ &prefix_extensions[114][7],
/* OP_vhsubpd */ &prefix_extensions[115][6],
/* OP_vhsubps */ &prefix_extensions[115][7],
/* OP_vaddsubpd */ &prefix_extensions[116][6],
/* OP_vaddsubps */ &prefix_extensions[116][7],
/* OP_vlddqu */ &prefix_extensions[117][7],
/* OP_vpshufb */ &prefix_extensions[118][6],
/* OP_vphaddw */ &prefix_extensions[119][6],
/* OP_vphaddd */ &prefix_extensions[120][6],
/* OP_vphaddsw */ &prefix_extensions[121][6],
/* OP_vpmaddubsw */ &prefix_extensions[122][6],
/* OP_vphsubw */ &prefix_extensions[123][6],
/* OP_vphsubd */ &prefix_extensions[124][6],
/* OP_vphsubsw */ &prefix_extensions[125][6],
/* OP_vpsignb */ &prefix_extensions[126][6],
/* OP_vpsignw */ &prefix_extensions[127][6],
/* OP_vpsignd */ &prefix_extensions[128][6],
/* OP_vpmulhrsw */ &prefix_extensions[129][6],
/* OP_vpabsb */ &prefix_extensions[130][6],
/* OP_vpabsw */ &prefix_extensions[131][6],
/* OP_vpabsd */ &prefix_extensions[132][6],
/* OP_vpalignr */ &prefix_extensions[133][6],
/* OP_vpblendvb */ &e_vex_extensions[ 2][1],
/* OP_vblendvps */ &e_vex_extensions[ 0][1],
/* OP_vblendvpd */ &e_vex_extensions[ 1][1],
/* OP_vptest */ &e_vex_extensions[ 3][1],
/* OP_vpmovsxbw */ &e_vex_extensions[ 4][1],
/* OP_vpmovsxbd */ &e_vex_extensions[ 5][1],
/* OP_vpmovsxbq */ &e_vex_extensions[ 6][1],
/* OP_vpmovsxwd */ &e_vex_extensions[ 7][1],
/* OP_vpmovsxwq */ &e_vex_extensions[ 8][1],
/* OP_vpmovsxdq */ &e_vex_extensions[ 9][1],
/* OP_vpmuldq */ &e_vex_extensions[10][1],
/* OP_vpcmpeqq */ &e_vex_extensions[11][1],
/* OP_vmovntdqa */ &e_vex_extensions[12][1],
/* OP_vpackusdw */ &e_vex_extensions[13][1],
/* OP_vpmovzxbw */ &e_vex_extensions[14][1],
/* OP_vpmovzxbd */ &e_vex_extensions[15][1],
/* OP_vpmovzxbq */ &e_vex_extensions[16][1],
/* OP_vpmovzxwd */ &e_vex_extensions[17][1],
/* OP_vpmovzxwq */ &e_vex_extensions[18][1],
/* OP_vpmovzxdq */ &e_vex_extensions[19][1],
/* OP_vpcmpgtq */ &e_vex_extensions[20][1],
/* OP_vpminsb */ &e_vex_extensions[21][1],
/* OP_vpminsd */ &e_vex_extensions[22][1],
/* OP_vpminuw */ &e_vex_extensions[23][1],
/* OP_vpminud */ &e_vex_extensions[24][1],
/* OP_vpmaxsb */ &e_vex_extensions[25][1],
/* OP_vpmaxsd */ &e_vex_extensions[26][1],
/* OP_vpmaxuw */ &e_vex_extensions[27][1],
/* OP_vpmaxud */ &e_vex_extensions[28][1],
/* OP_vpmulld */ &e_vex_extensions[29][1],
/* OP_vphminposuw */ &e_vex_extensions[30][1],
/* OP_vaesimc */ &e_vex_extensions[31][1],
/* OP_vaesenc */ &e_vex_extensions[32][1],
/* OP_vaesenclast */ &e_vex_extensions[33][1],
/* OP_vaesdec */ &e_vex_extensions[34][1],
/* OP_vaesdeclast */ &e_vex_extensions[35][1],
/* OP_vpextrb */ &e_vex_extensions[36][1],
/* OP_vpextrd */ &e_vex_extensions[38][1],
/* OP_vextractps */ &e_vex_extensions[39][1],
/* OP_vroundps */ &e_vex_extensions[40][1],
/* OP_vroundpd */ &e_vex_extensions[41][1],
/* OP_vroundss */ &e_vex_extensions[42][1],
/* OP_vroundsd */ &e_vex_extensions[43][1],
/* OP_vblendps */ &e_vex_extensions[44][1],
/* OP_vblendpd */ &e_vex_extensions[45][1],
/* OP_vpblendw */ &e_vex_extensions[46][1],
/* OP_vpinsrb */ &e_vex_extensions[47][1],
/* OP_vinsertps */ &e_vex_extensions[48][1],
/* OP_vpinsrd */ &e_vex_extensions[49][1],
/* OP_vdpps */ &e_vex_extensions[50][1],
/* OP_vdppd */ &e_vex_extensions[51][1],
/* OP_vmpsadbw */ &e_vex_extensions[52][1],
/* OP_vpcmpestrm */ &e_vex_extensions[53][1],
/* OP_vpcmpestri */ &e_vex_extensions[54][1],
/* OP_vpcmpistrm */ &e_vex_extensions[55][1],
/* OP_vpcmpistri */ &e_vex_extensions[56][1],
/* OP_vpclmulqdq */ &e_vex_extensions[57][1],
/* OP_vaeskeygenassist*/ &e_vex_extensions[58][1],
/* OP_vtestps */ &e_vex_extensions[59][1],
/* OP_vtestpd */ &e_vex_extensions[60][1],
/* OP_vzeroupper */ &vex_L_extensions[0][1],
/* OP_vzeroall */ &vex_L_extensions[0][2],
/* OP_vldmxcsr */ &e_vex_extensions[61][1],
/* OP_vstmxcsr */ &e_vex_extensions[62][1],
/* OP_vbroadcastss */ &e_vex_extensions[64][1],
/* OP_vbroadcastsd */ &e_vex_extensions[65][1],
/* OP_vbroadcastf128*/ &e_vex_extensions[66][1],
/* OP_vmaskmovps */ &e_vex_extensions[67][1],
/* OP_vmaskmovpd */ &e_vex_extensions[68][1],
/* OP_vpermilps */ &e_vex_extensions[71][1],
/* OP_vpermilpd */ &e_vex_extensions[72][1],
/* OP_vperm2f128 */ &e_vex_extensions[73][1],
/* OP_vinsertf128 */ &e_vex_extensions[74][1],
/* OP_vextractf128 */ &e_vex_extensions[75][1],
/* added in Ivy Bridge I believe, and covered by F16C cpuid flag */
/* OP_vcvtph2ps */ &e_vex_extensions[63][1],
/* OP_vcvtps2ph */ &e_vex_extensions[76][1],
/* FMA */
/* OP_vfmadd132ps */ &vex_W_extensions[ 0][0],
/* OP_vfmadd132pd */ &vex_W_extensions[ 0][1],
/* OP_vfmadd213ps */ &vex_W_extensions[ 1][0],
/* OP_vfmadd213pd */ &vex_W_extensions[ 1][1],
/* OP_vfmadd231ps */ &vex_W_extensions[ 2][0],
/* OP_vfmadd231pd */ &vex_W_extensions[ 2][1],
/* OP_vfmadd132ss */ &vex_W_extensions[ 3][0],
/* OP_vfmadd132sd */ &vex_W_extensions[ 3][1],
/* OP_vfmadd213ss */ &vex_W_extensions[ 4][0],
/* OP_vfmadd213sd */ &vex_W_extensions[ 4][1],
/* OP_vfmadd231ss */ &vex_W_extensions[ 5][0],
/* OP_vfmadd231sd */ &vex_W_extensions[ 5][1],
/* OP_vfmaddsub132ps*/ &vex_W_extensions[ 6][0],
/* OP_vfmaddsub132pd*/ &vex_W_extensions[ 6][1],
/* OP_vfmaddsub213ps*/ &vex_W_extensions[ 7][0],
/* OP_vfmaddsub213pd*/ &vex_W_extensions[ 7][1],
/* OP_vfmaddsub231ps*/ &vex_W_extensions[ 8][0],
/* OP_vfmaddsub231pd*/ &vex_W_extensions[ 8][1],
/* OP_vfmsubadd132ps*/ &vex_W_extensions[ 9][0],
/* OP_vfmsubadd132pd*/ &vex_W_extensions[ 9][1],
/* OP_vfmsubadd213ps*/ &vex_W_extensions[10][0],
/* OP_vfmsubadd213pd*/ &vex_W_extensions[10][1],
/* OP_vfmsubadd231ps*/ &vex_W_extensions[11][0],
/* OP_vfmsubadd231pd*/ &vex_W_extensions[11][1],
/* OP_vfmsub132ps */ &vex_W_extensions[12][0],
/* OP_vfmsub132pd */ &vex_W_extensions[12][1],
/* OP_vfmsub213ps */ &vex_W_extensions[13][0],
/* OP_vfmsub213pd */ &vex_W_extensions[13][1],
/* OP_vfmsub231ps */ &vex_W_extensions[14][0],
/* OP_vfmsub231pd */ &vex_W_extensions[14][1],
/* OP_vfmsub132ss */ &vex_W_extensions[15][0],
/* OP_vfmsub132sd */ &vex_W_extensions[15][1],
/* OP_vfmsub213ss */ &vex_W_extensions[16][0],
/* OP_vfmsub213sd */ &vex_W_extensions[16][1],
/* OP_vfmsub231ss */ &vex_W_extensions[17][0],
/* OP_vfmsub231sd */ &vex_W_extensions[17][1],
/* OP_vfnmadd132ps */ &vex_W_extensions[18][0],
/* OP_vfnmadd132pd */ &vex_W_extensions[18][1],
/* OP_vfnmadd213ps */ &vex_W_extensions[19][0],
/* OP_vfnmadd213pd */ &vex_W_extensions[19][1],
/* OP_vfnmadd231ps */ &vex_W_extensions[20][0],
/* OP_vfnmadd231pd */ &vex_W_extensions[20][1],
/* OP_vfnmadd132ss */ &vex_W_extensions[21][0],
/* OP_vfnmadd132sd */ &vex_W_extensions[21][1],
/* OP_vfnmadd213ss */ &vex_W_extensions[22][0],
/* OP_vfnmadd213sd */ &vex_W_extensions[22][1],
/* OP_vfnmadd231ss */ &vex_W_extensions[23][0],
/* OP_vfnmadd231sd */ &vex_W_extensions[23][1],
/* OP_vfnmsub132ps */ &vex_W_extensions[24][0],
/* OP_vfnmsub132pd */ &vex_W_extensions[24][1],
/* OP_vfnmsub213ps */ &vex_W_extensions[25][0],
/* OP_vfnmsub213pd */ &vex_W_extensions[25][1],
/* OP_vfnmsub231ps */ &vex_W_extensions[26][0],
/* OP_vfnmsub231pd */ &vex_W_extensions[26][1],
/* OP_vfnmsub132ss */ &vex_W_extensions[27][0],
/* OP_vfnmsub132sd */ &vex_W_extensions[27][1],
/* OP_vfnmsub213ss */ &vex_W_extensions[28][0],
/* OP_vfnmsub213sd */ &vex_W_extensions[28][1],
/* OP_vfnmsub231ss */ &vex_W_extensions[29][0],
/* OP_vfnmsub231sd */ &vex_W_extensions[29][1],
/* SSE2 that were omitted before */
/* OP_movq2dq */ &prefix_extensions[61][1],
/* OP_movdq2q */ &prefix_extensions[61][3],
/* OP_fxsave64 */ &rex_w_extensions[0][1],
/* OP_fxrstor64 */ &rex_w_extensions[1][1],
/* OP_xsave64 */ &rex_w_extensions[2][1],
/* OP_xrstor64 */ &rex_w_extensions[3][1],
/* OP_xsaveopt64 */ &rex_w_extensions[4][1],
/* added in Intel Ivy Bridge: RDRAND and FSGSBASE cpuid flags */
/* OP_rdrand */ &mod_extensions[12][1],
/* OP_rdfsbase */ &mod_extensions[14][1],
/* OP_rdgsbase */ &mod_extensions[15][1],
/* OP_wrfsbase */ &mod_extensions[16][1],
/* OP_wrgsbase */ &mod_extensions[17][1],
/* coming in the future but adding now since enough details are known */
/* OP_rdseed */ &mod_extensions[13][1],
/* AMD FMA4 */
/* OP_vfmaddsubps */ &vex_W_extensions[30][0],
/* OP_vfmaddsubpd */ &vex_W_extensions[31][0],
/* OP_vfmsubaddps */ &vex_W_extensions[32][0],
/* OP_vfmsubaddpd */ &vex_W_extensions[33][0],
/* OP_vfmaddps */ &vex_W_extensions[34][0],
/* OP_vfmaddpd */ &vex_W_extensions[35][0],
/* OP_vfmaddss */ &vex_W_extensions[36][0],
/* OP_vfmaddsd */ &vex_W_extensions[37][0],
/* OP_vfmsubps */ &vex_W_extensions[38][0],
/* OP_vfmsubpd */ &vex_W_extensions[39][0],
/* OP_vfmsubss */ &vex_W_extensions[40][0],
/* OP_vfmsubsd */ &vex_W_extensions[41][0],
/* OP_vfnmaddps */ &vex_W_extensions[42][0],
/* OP_vfnmaddpd */ &vex_W_extensions[43][0],
/* OP_vfnmaddss */ &vex_W_extensions[44][0],
/* OP_vfnmaddsd */ &vex_W_extensions[45][0],
/* OP_vfnmsubps */ &vex_W_extensions[46][0],
/* OP_vfnmsubpd */ &vex_W_extensions[47][0],
/* OP_vfnmsubss */ &vex_W_extensions[48][0],
/* OP_vfnmsubsd */ &vex_W_extensions[49][0],
/* AMD XOP */
/* OP_vfrczps */ &xop_extensions[27],
/* OP_vfrczpd */ &xop_extensions[28],
/* OP_vfrczss */ &xop_extensions[29],
/* OP_vfrczsd */ &xop_extensions[30],
/* OP_vpcmov */ &vex_W_extensions[50][0],
/* OP_vpcomb */ &xop_extensions[19],
/* OP_vpcomw */ &xop_extensions[20],
/* OP_vpcomd */ &xop_extensions[21],
/* OP_vpcomq */ &xop_extensions[22],
/* OP_vpcomub */ &xop_extensions[23],
/* OP_vpcomuw */ &xop_extensions[24],
/* OP_vpcomud */ &xop_extensions[25],
/* OP_vpcomuq */ &xop_extensions[26],
/* OP_vpermil2pd */ &vex_W_extensions[65][0],
/* OP_vpermil2ps */ &vex_W_extensions[64][0],
/* OP_vphaddbw */ &xop_extensions[43],
/* OP_vphaddbd */ &xop_extensions[44],
/* OP_vphaddbq */ &xop_extensions[45],
/* OP_vphaddwd */ &xop_extensions[46],
/* OP_vphaddwq */ &xop_extensions[47],
/* OP_vphadddq */ &xop_extensions[48],
/* OP_vphaddubw */ &xop_extensions[49],
/* OP_vphaddubd */ &xop_extensions[50],
/* OP_vphaddubq */ &xop_extensions[51],
/* OP_vphadduwd */ &xop_extensions[52],
/* OP_vphadduwq */ &xop_extensions[53],
/* OP_vphaddudq */ &xop_extensions[54],
/* OP_vphsubbw */ &xop_extensions[55],
/* OP_vphsubwd */ &xop_extensions[56],
/* OP_vphsubdq */ &xop_extensions[57],
/* OP_vpmacssww */ &xop_extensions[ 1],
/* OP_vpmacsswd */ &xop_extensions[ 2],
/* OP_vpmacssdql */ &xop_extensions[ 3],
/* OP_vpmacssdd */ &xop_extensions[ 4],
/* OP_vpmacssdqh */ &xop_extensions[ 5],
/* OP_vpmacsww */ &xop_extensions[ 6],
/* OP_vpmacswd */ &xop_extensions[ 7],
/* OP_vpmacsdql */ &xop_extensions[ 8],
/* OP_vpmacsdd */ &xop_extensions[ 9],
/* OP_vpmacsdqh */ &xop_extensions[10],
/* OP_vpmadcsswd */ &xop_extensions[13],
/* OP_vpmadcswd */ &xop_extensions[14],
/* OP_vpperm */ &vex_W_extensions[51][0],
/* OP_vprotb */ &xop_extensions[15],
/* OP_vprotw */ &xop_extensions[16],
/* OP_vprotd */ &xop_extensions[17],
/* OP_vprotq */ &xop_extensions[18],
/* OP_vpshlb */ &vex_W_extensions[56][0],
/* OP_vpshlw */ &vex_W_extensions[57][0],
/* OP_vpshld */ &vex_W_extensions[58][0],
/* OP_vpshlq */ &vex_W_extensions[59][0],
/* OP_vpshab */ &vex_W_extensions[60][0],
/* OP_vpshaw */ &vex_W_extensions[61][0],
/* OP_vpshad */ &vex_W_extensions[62][0],
/* OP_vpshaq */ &vex_W_extensions[63][0],
/* AMD TBM */
/* OP_bextr */ &prefix_extensions[141][4],
/* OP_blcfill */ &base_extensions[27][1],
/* OP_blci */ &base_extensions[28][6],
/* OP_blcic */ &base_extensions[27][5],
/* OP_blcmsk */ &base_extensions[28][1],
/* OP_blcs */ &base_extensions[27][3],
/* OP_blsfill */ &base_extensions[27][2],
/* OP_blsic */ &base_extensions[27][6],
/* OP_t1mskc */ &base_extensions[27][7],
/* OP_tzmsk */ &base_extensions[27][4],
/* AMD LWP */
/* OP_llwpcb */ &base_extensions[29][0],
/* OP_slwpcb */ &base_extensions[29][1],
/* OP_lwpins */ &base_extensions[30][0],
/* OP_lwpval */ &base_extensions[30][1],
/* Intel BMI1 */
/* (includes non-immed form of OP_bextr) */
/* OP_andn */ &third_byte_38[100],
/* OP_blsr */ &base_extensions[31][1],
/* OP_blsmsk */ &base_extensions[31][2],
/* OP_blsi */ &base_extensions[31][3],
/* OP_tzcnt */ &prefix_extensions[140][1],
/* Intel BMI2 */
/* OP_bzhi */ &prefix_extensions[142][4],
/* OP_pext */ &prefix_extensions[142][6],
/* OP_pdep */ &prefix_extensions[142][7],
/* OP_sarx */ &prefix_extensions[141][5],
/* OP_shlx */ &prefix_extensions[141][6],
/* OP_shrx */ &prefix_extensions[141][7],
/* OP_rorx */ &third_byte_3a[56],
/* OP_mulx */ &prefix_extensions[143][7],
/* Intel Safer Mode Extensions */
/* OP_getsec */ &second_byte[0x37],
/* Misc Intel additions */
/* OP_vmfunc */ &rm_extensions[4][4],
/* OP_invpcid */ &third_byte_38[103],
/* Intel TSX */
/* OP_xabort */ &base_extensions[17][7],
/* OP_xbegin */ &base_extensions[18][7],
/* OP_xend */ &rm_extensions[4][5],
/* OP_xtest */ &rm_extensions[4][6],
/* AVX2 */
/* OP_vpgatherdd */ &vex_W_extensions[66][0],
/* OP_vpgatherdq */ &vex_W_extensions[66][1],
/* OP_vpgatherqd */ &vex_W_extensions[67][0],
/* OP_vpgatherqq */ &vex_W_extensions[67][1],
/* OP_vgatherdps */ &vex_W_extensions[68][0],
/* OP_vgatherdpd */ &vex_W_extensions[68][1],
/* OP_vgatherqps */ &vex_W_extensions[69][0],
/* OP_vgatherqpd */ &vex_W_extensions[69][1],
/* OP_vbroadcasti128 */ &third_byte_38[108],
/* OP_vinserti128 */ &third_byte_3a[57],
/* OP_vextracti128 */ &third_byte_3a[58],
/* OP_vpmaskmovd */ &vex_W_extensions[70][0],
/* OP_vpmaskmovq */ &vex_W_extensions[70][1],
/* OP_vperm2i128 */ &third_byte_3a[62],
/* OP_vpermd */ &third_byte_38[112],
/* OP_vpermps */ &third_byte_38[111],
/* OP_vpermq */ &third_byte_3a[59],
/* OP_vpermpd */ &third_byte_3a[60],
/* OP_vpblendd */ &third_byte_3a[61],
/* OP_vpsllvd */ &vex_W_extensions[73][0],
/* OP_vpsllvq */ &vex_W_extensions[73][1],
/* OP_vpsravd */ &third_byte_38[114],
/* OP_vpsrlvd */ &vex_W_extensions[72][0],
/* OP_vpsrlvq */ &vex_W_extensions[72][1],
/* OP_vpbroadcastb */ &third_byte_38[116],
/* OP_vpbroadcastw */ &third_byte_38[117],
/* OP_vpbroadcastd */ &third_byte_38[118],
/* OP_vpbroadcastq */ &third_byte_38[119],
/* added in Intel Skylake */
/* OP_xsavec32 */ &rex_w_extensions[5][0],
/* OP_xsavec64 */ &rex_w_extensions[5][1],
/* Intel ADX */
/* OP_adox */ &prefix_extensions[143][1],
/* OP_adcx */ &prefix_extensions[143][2],
/* AVX-512 VEX encoded (scalar opmask instructions) */
/* OP_kmovw */ &vex_W_extensions[74][0],
/* OP_kmovb */ &vex_W_extensions[75][0],
/* OP_kmovq */ &vex_W_extensions[74][1],
/* OP_kmovd */ &vex_W_extensions[75][1],
/* OP_kandw */ &vex_W_extensions[82][0],
/* OP_kandb */ &vex_W_extensions[83][0],
/* OP_kandq */ &vex_W_extensions[82][1],
/* OP_kandd */ &vex_W_extensions[83][1],
/* OP_kandnw */ &vex_W_extensions[84][0],
/* OP_kandnb */ &vex_W_extensions[85][0],
/* OP_kandnq */ &vex_W_extensions[84][1],
/* OP_kandnd */ &vex_W_extensions[85][1],
/* OP_kunpckbw */ &vex_W_extensions[87][0],
/* OP_kunpckwd */ &vex_W_extensions[86][0],
/* OP_kunpckdq */ &vex_W_extensions[86][1],
/* OP_knotw */ &vex_W_extensions[88][0],
/* OP_knotb */ &vex_W_extensions[89][0],
/* OP_knotq */ &vex_W_extensions[88][1],
/* OP_knotd */ &vex_W_extensions[89][1],
/* OP_korw */ &vex_W_extensions[90][0],
/* OP_korb */ &vex_W_extensions[91][0],
/* OP_korq */ &vex_W_extensions[90][1],
/* OP_kord */ &vex_W_extensions[91][1],
/* OP_kxnorw */ &vex_W_extensions[92][0],
/* OP_kxnorb */ &vex_W_extensions[93][0],
/* OP_kxnorq */ &vex_W_extensions[92][1],
/* OP_kxnord */ &vex_W_extensions[93][1],
/* OP_kxorw */ &vex_W_extensions[94][0],
/* OP_kxorb */ &vex_W_extensions[95][0],
/* OP_kxorq */ &vex_W_extensions[94][1],
/* OP_kxord */ &vex_W_extensions[95][1],
/* OP_kaddw */ &vex_W_extensions[96][0],
/* OP_kaddb */ &vex_W_extensions[97][0],
/* OP_kaddq */ &vex_W_extensions[96][1],
/* OP_kaddd */ &vex_W_extensions[97][1],
/* OP_kortestw */ &vex_W_extensions[98][0],
/* OP_kortestb */ &vex_W_extensions[99][0],
/* OP_kortestq */ &vex_W_extensions[98][1],
/* OP_kortestd */ &vex_W_extensions[99][1],
/* OP_kshiftlw */ &vex_W_extensions[100][1],
/* OP_kshiftlb */ &vex_W_extensions[100][0],
/* OP_kshiftlq */ &vex_W_extensions[101][1],
/* OP_kshiftld */ &vex_W_extensions[101][0],
/* OP_kshiftrw */ &vex_W_extensions[102][1],
/* OP_kshiftrb */ &vex_W_extensions[102][0],
/* OP_kshiftrq */ &vex_W_extensions[103][1],
/* OP_kshiftrd */ &vex_W_extensions[103][0],
/* OP_ktestw */ &vex_W_extensions[104][0],
/* OP_ktestb */ &vex_W_extensions[105][0],
/* OP_ktestq */ &vex_W_extensions[104][1],
/* OP_ktestd */ &vex_W_extensions[105][1],
/* AVX-512 EVEX encoded */
/* OP_vmovdqa32 */ &evex_W_extensions[8][0],
/* OP_vmovdqa64 */ &evex_W_extensions[8][1],
/* OP_vmovdqu8 */ &evex_W_extensions[10][0],
/* OP_vmovdqu16 */ &evex_W_extensions[10][1],
/* OP_vmovdqu32 */ &evex_W_extensions[11][0],
/* OP_vmovdqu64 */ &evex_W_extensions[11][1],
/* TODO i#1312. */
};
/****************************************************************************
* Macros to make tables legible
*/
/* Jb is defined in dynamo.h, undefine it for this file */
#undef Jb
#define xx TYPE_NONE, OPSZ_NA
/* from Intel tables, using our corresponding OPSZ constants */
#define Ap TYPE_A, OPSZ_6_irex10_short4 /* NOTE - not legal for 64-bit instructions */
#define By TYPE_B, OPSZ_4_rex8
#define Cr TYPE_C, OPSZ_4x8
#define Dr TYPE_D, OPSZ_4x8
#define Eb TYPE_E, OPSZ_1
#define Ew TYPE_E, OPSZ_2
#define Ev TYPE_E, OPSZ_4_rex8_short2
#define Esv TYPE_E, OPSZ_4x8_short2 /* "stack v", or "d64" in Intel tables */
#define Ed TYPE_E, OPSZ_4
#define Ep TYPE_E, OPSZ_6_irex10_short4
#define Ed_q TYPE_E, OPSZ_4_rex8
#define Ey TYPE_E, OPSZ_4_rex8
#define Rd_Mb TYPE_E, OPSZ_1_reg4
#define Rd_Mw TYPE_E, OPSZ_2_reg4
#define Gb TYPE_G, OPSZ_1
#define Gw TYPE_G, OPSZ_2
#define Gv TYPE_G, OPSZ_4_rex8_short2
#define Gz TYPE_G, OPSZ_4_short2
#define Gd TYPE_G, OPSZ_4
#define Gd_q TYPE_G, OPSZ_4_rex8
#define Gr TYPE_G, OPSZ_4x8
#define Gy TYPE_G, OPSZ_4_rex8
#define Ib TYPE_I, OPSZ_1
#define Iw TYPE_I, OPSZ_2
#define Id TYPE_I, OPSZ_4
#define Iv TYPE_I, OPSZ_4_rex8_short2
#define Iz TYPE_I, OPSZ_4_short2
#define Jb TYPE_J, OPSZ_1
#define Jz TYPE_J, OPSZ_4_short2xi4
#define Ma TYPE_M, OPSZ_8_short4
#define Mp TYPE_M, OPSZ_6_irex10_short4
#define Ms TYPE_M, OPSZ_6x10
#define Ob TYPE_O, OPSZ_1
#define Ov TYPE_O, OPSZ_4_rex8_short2
#define Pd TYPE_P, OPSZ_4
#define Pq TYPE_P, OPSZ_8
#define Pw_q TYPE_P, OPSZ_2_of_8
#define Pd_q TYPE_P, OPSZ_4_of_8
#define Ppi TYPE_P, OPSZ_8
#define Nw_q TYPE_P_MODRM, OPSZ_2_of_8
#define Nq TYPE_P_MODRM, OPSZ_8
#define Qd TYPE_Q, OPSZ_4
#define Qq TYPE_Q, OPSZ_8
#define Qpi TYPE_Q, OPSZ_8
#define Rr TYPE_R, OPSZ_4x8
#define Rv TYPE_R, OPSZ_4_rex8_short2
#define Ry TYPE_R, OPSZ_4_rex8
#define Sw TYPE_S, OPSZ_2
#define Vq TYPE_V, OPSZ_8
#define Vdq TYPE_V, OPSZ_16
#define Vb_dq TYPE_V, OPSZ_1_of_16
#define Vw_dq TYPE_V, OPSZ_2_of_16
#define Vd_dq TYPE_V, OPSZ_4_of_16
#define Vd_q_dq TYPE_V, OPSZ_4_rex8_of_16
#define Vq_dq TYPE_V, OPSZ_8_of_16
#define Vps TYPE_V, OPSZ_16
#define Vpd TYPE_V, OPSZ_16
#define Vss TYPE_V, OPSZ_4_of_16
#define Vsd TYPE_V, OPSZ_8_of_16
#define Ups TYPE_V_MODRM, OPSZ_16
#define Upd TYPE_V_MODRM, OPSZ_16
#define Udq TYPE_V_MODRM, OPSZ_16
#define Uw_dq TYPE_V_MODRM, OPSZ_2_of_16
#define Uq_dq TYPE_V_MODRM, OPSZ_8_of_16
#define Wq TYPE_W, OPSZ_8
#define Wdq TYPE_W, OPSZ_16
#define Wb_dq TYPE_W, OPSZ_1_of_16
#define Ww_dq TYPE_W, OPSZ_2_of_16
#define Wd_dq TYPE_W, OPSZ_4_of_16
#define Wq_dq TYPE_W, OPSZ_8_of_16
#define Wps TYPE_W, OPSZ_16
#define Wpd TYPE_W, OPSZ_16
#define Wss TYPE_W, OPSZ_4_of_16
#define Wsd TYPE_W, OPSZ_8_of_16
#define Udq_Md TYPE_W, OPSZ_4_reg16
#define Xb TYPE_X, OPSZ_1
#define Xv TYPE_X, OPSZ_4_rex8_short2
#define Xz TYPE_X, OPSZ_4_short2
#define Yb TYPE_Y, OPSZ_1
#define Yv TYPE_Y, OPSZ_4_rex8_short2
#define Yz TYPE_Y, OPSZ_4_short2
/* AVX additions */
#define Vvs TYPE_V, OPSZ_16_vex32
#define Vvd TYPE_V, OPSZ_16_vex32
#define Vx TYPE_V, OPSZ_16_vex32
#define Vqq TYPE_V, OPSZ_32
#define Vdq_qq TYPE_V, OPSZ_16_of_32
#define Wvs TYPE_W, OPSZ_16_vex32
#define Wvd TYPE_W, OPSZ_16_vex32
#define Wx TYPE_W, OPSZ_16_vex32
#define Uvs TYPE_V_MODRM, OPSZ_16_vex32
#define Uvd TYPE_V_MODRM, OPSZ_16_vex32
#define Uss TYPE_V_MODRM, OPSZ_4_of_16
#define Usd TYPE_V_MODRM, OPSZ_8_of_16
#define Ux TYPE_V_MODRM, OPSZ_16_vex32
#define Udq TYPE_V_MODRM, OPSZ_16
#define Hvs TYPE_H, OPSZ_16_vex32
#define Hvd TYPE_H, OPSZ_16_vex32
#define Hss TYPE_H, OPSZ_4_of_16
#define Hsd TYPE_H, OPSZ_8_of_16
#define Hq_dq TYPE_H, OPSZ_8_of_16
#define Hdq TYPE_H, OPSZ_16
#define H12_dq TYPE_H, OPSZ_12_of_16
#define H12_8_dq TYPE_H, OPSZ_12_rex8_of_16
#define H14_dq TYPE_H, OPSZ_14_of_16
#define H15_dq TYPE_H, OPSZ_15_of_16
#define Hqq TYPE_H, OPSZ_32
#define Hx TYPE_H, OPSZ_16_vex32
#define Hh_x TYPE_H, OPSZ_half_16_vex32
#define Wvq_dq TYPE_W, OPSZ_8_of_16_vex32
#define Wh_x TYPE_W, OPSZ_half_16_vex32
#define Wqq TYPE_W, OPSZ_32
#define Mvs TYPE_M, OPSZ_16_vex32
#define Mvd TYPE_M, OPSZ_16_vex32
#define Mx TYPE_M, OPSZ_16_vex32
#define Ldq TYPE_L, OPSZ_16 /* immed is 1 byte but reg is xmm */
#define Lx TYPE_L, OPSZ_16_vex32 /* immed is 1 byte but reg is xmm/ymm */
#define Lvs TYPE_L, OPSZ_16_vex32 /* immed is 1 byte but reg is xmm/ymm */
#define Lss TYPE_L, OPSZ_4_of_16 /* immed is 1 byte but reg is xmm/ymm */
#define Lsd TYPE_L, OPSZ_8_of_16 /* immed is 1 byte but reg is xmm/ymm */
/* AVX-512 additions */
#define KPb TYPE_K_REG, OPSZ_1
#define KPw TYPE_K_REG, OPSZ_2
#define KPd TYPE_K_REG, OPSZ_4
#define KPq TYPE_K_REG, OPSZ_8
#define KRb TYPE_K_MODRM_R, OPSZ_1
#define KRw TYPE_K_MODRM_R, OPSZ_2
#define KRd TYPE_K_MODRM_R, OPSZ_4
#define KRq TYPE_K_MODRM_R, OPSZ_8
#define KQb TYPE_K_MODRM, OPSZ_1
#define KQw TYPE_K_MODRM, OPSZ_2
#define KQd TYPE_K_MODRM, OPSZ_4
#define KQq TYPE_K_MODRM, OPSZ_8
#define KVb TYPE_K_VEX, OPSZ_1
#define KVw TYPE_K_VEX, OPSZ_2
#define KVd TYPE_K_VEX, OPSZ_4
#define KVq TYPE_K_VEX, OPSZ_8
#define KEb TYPE_K_EVEX, OPSZ_1
#define KEw TYPE_K_EVEX, OPSZ_2
#define KEd TYPE_K_EVEX, OPSZ_4
#define KEq TYPE_K_EVEX, OPSZ_8
#define Ves TYPE_V, OPSZ_16_vex32_evex64
#define Ved TYPE_V, OPSZ_16_vex32_evex64
#define Wes TYPE_W, OPSZ_16_vex32_evex64
#define Wed TYPE_W, OPSZ_16_vex32_evex64
#define Vex TYPE_V, OPSZ_16_vex32_evex64
#define Wex TYPE_W, OPSZ_16_vex32_evex64
/* my own codes
* size m = 32 or 16 bit depending on addr size attribute
* B=ds:eDI, Z=xlat's mem, K=float in mem, i_==indirect
*/
#define Mb TYPE_M, OPSZ_1
#define Md TYPE_M, OPSZ_4
#define Md_q TYPE_M, OPSZ_4_rex8
#define Mw TYPE_M, OPSZ_2
#define Mm TYPE_M, OPSZ_lea
#define Me TYPE_M, OPSZ_512
#define Mxsave TYPE_M, OPSZ_xsave
#define Mps TYPE_M, OPSZ_16
#define Mpd TYPE_M, OPSZ_16
#define Mss TYPE_M, OPSZ_4
#define Msd TYPE_M, OPSZ_8
#define Mq TYPE_M, OPSZ_8
#define Mdq TYPE_M, OPSZ_16
#define Mq_dq TYPE_M, OPSZ_8_rex16
#define Mv TYPE_M, OPSZ_4_rex8_short2
#define MVd TYPE_VSIB, OPSZ_4
#define MVq TYPE_VSIB, OPSZ_8
#define Zb TYPE_XLAT, OPSZ_1
#define Bq TYPE_MASKMOVQ, OPSZ_8
#define Bdq TYPE_MASKMOVQ, OPSZ_16
#define Fw TYPE_FLOATMEM, OPSZ_2
#define Fd TYPE_FLOATMEM, OPSZ_4
#define Fq TYPE_FLOATMEM, OPSZ_8
#define Fx TYPE_FLOATMEM, OPSZ_10
#define Fy TYPE_FLOATMEM, OPSZ_28_short14 /* _14_ if data16 */
#define Fz TYPE_FLOATMEM, OPSZ_108_short94 /* _98_ if data16 */
#define i_dx TYPE_INDIR_REG, REG_DX
#define i_Ev TYPE_INDIR_E, OPSZ_4_rex8_short2
#define i_Exi TYPE_INDIR_E, OPSZ_4x8_short2xi8
#define i_Ep TYPE_INDIR_E, OPSZ_6_irex10_short4
#define i_xSP TYPE_INDIR_VAR_XREG, REG_ESP
#define i_iSP TYPE_INDIR_VAR_XIREG, REG_ESP
#define i_xBP TYPE_INDIR_VAR_XREG, REG_EBP
/* negative offset from (%xsp) for pushes */
#define i_iSPo1 TYPE_INDIR_VAR_XIREG_OFFS_1, REG_ESP
#define i_vSPo2 TYPE_INDIR_VAR_REG_OFFS_2, REG_ESP
#define i_xSPo1 TYPE_INDIR_VAR_XREG_OFFS_1, REG_ESP
#define i_xSPo8 TYPE_INDIR_VAR_XREG_OFFS_8, REG_ESP
#define i_xSPs8 TYPE_INDIR_VAR_XREG_SIZEx8, REG_ESP
#define i_vSPs2 TYPE_INDIR_VAR_REG_SIZEx2, REG_ESP
#define i_vSPs3 TYPE_INDIR_VAR_REG_SIZEx3x5, REG_ESP
/* pop but unusual size */
#define i_xSPoN TYPE_INDIR_VAR_XREG_OFFS_N, REG_ESP
#define c1 TYPE_1, OPSZ_0
/* we pick the right constant based on the opcode */
#define cF TYPE_FLOATCONST, OPSZ_0
/* registers that are base 32 but vary down or up */
#define eAX TYPE_VAR_REG, REG_EAX
#define eCX TYPE_VAR_REG, REG_ECX
#define eDX TYPE_VAR_REG, REG_EDX
#define eBX TYPE_VAR_REG, REG_EBX
#define eSP TYPE_VAR_REG, REG_ESP
#define eBP TYPE_VAR_REG, REG_EBP
#define eSI TYPE_VAR_REG, REG_ESI
#define eDI TYPE_VAR_REG, REG_EDI
/* registers that are base 32 and can vary down but not up */
#define zAX TYPE_VARZ_REG, REG_EAX
#define zCX TYPE_VARZ_REG, REG_ECX
#define zDX TYPE_VARZ_REG, REG_EDX
#define zBX TYPE_VARZ_REG, REG_EBX
#define zSP TYPE_VARZ_REG, REG_ESP
#define zBP TYPE_VARZ_REG, REG_EBP
#define zSI TYPE_VARZ_REG, REG_ESI
#define zDI TYPE_VARZ_REG, REG_EDI
/* registers whose base matches the mode, and can vary down but not up.
* we use the 32-bit versions but expand in resolve_var_reg()
*/
#define xAX TYPE_VAR_XREG, REG_EAX
#define xCX TYPE_VAR_XREG, REG_ECX
#define xDX TYPE_VAR_XREG, REG_EDX
#define xBX TYPE_VAR_XREG, REG_EBX
#define xSP TYPE_VAR_XREG, REG_ESP
#define xBP TYPE_VAR_XREG, REG_EBP
#define xSI TYPE_VAR_XREG, REG_ESI
#define xDI TYPE_VAR_XREG, REG_EDI
/* jecxz and loop* vary by addr16 */
#define axCX TYPE_VAR_ADDR_XREG, REG_ECX
/* string ops also use addr16 */
#define axSI TYPE_VAR_ADDR_XREG, REG_ESI
#define axDI TYPE_VAR_ADDR_XREG, REG_EDI
#define axAX TYPE_VAR_ADDR_XREG, REG_EAX
/* 8-bit implicit registers (not from modrm) that can be exteded via rex.r */
#define al_x TYPE_REG_EX, REG_AL
#define cl_x TYPE_REG_EX, REG_CL
#define dl_x TYPE_REG_EX, REG_DL
#define bl_x TYPE_REG_EX, REG_BL
#define ah_x TYPE_REG_EX, REG_AH
#define ch_x TYPE_REG_EX, REG_CH
#define dh_x TYPE_REG_EX, REG_DH
#define bh_x TYPE_REG_EX, REG_BH
/* 4_rex8_short2 implicit registers (not from modrm) that can be exteded via rex.r */
#define eAX_x TYPE_VAR_REG_EX, REG_EAX
#define eCX_x TYPE_VAR_REG_EX, REG_ECX
#define eDX_x TYPE_VAR_REG_EX, REG_EDX
#define eBX_x TYPE_VAR_REG_EX, REG_EBX
#define eSP_x TYPE_VAR_REG_EX, REG_ESP
#define eBP_x TYPE_VAR_REG_EX, REG_EBP
#define eSI_x TYPE_VAR_REG_EX, REG_ESI
#define eDI_x TYPE_VAR_REG_EX, REG_EDI
/* 4x8_short2 implicit registers (not from modrm) that can be exteded via rex.r */
#define xAX_x TYPE_VAR_XREG_EX, REG_EAX
#define xCX_x TYPE_VAR_XREG_EX, REG_ECX
#define xDX_x TYPE_VAR_XREG_EX, REG_EDX
#define xBX_x TYPE_VAR_XREG_EX, REG_EBX
#define xSP_x TYPE_VAR_XREG_EX, REG_ESP
#define xBP_x TYPE_VAR_XREG_EX, REG_EBP
#define xSI_x TYPE_VAR_XREG_EX, REG_ESI
#define xDI_x TYPE_VAR_XREG_EX, REG_EDI
/* 4_rex8 implicit registers (not from modrm) that can be exteded via rex.r */
#define uAX_x TYPE_VAR_REGX_EX, REG_EAX
#define uCX_x TYPE_VAR_REGX_EX, REG_ECX
#define uDX_x TYPE_VAR_REGX_EX, REG_EDX
#define uBX_x TYPE_VAR_REGX_EX, REG_EBX
#define uSP_x TYPE_VAR_REGX_EX, REG_ESP
#define uBP_x TYPE_VAR_REGX_EX, REG_EBP
#define uSI_x TYPE_VAR_REGX_EX, REG_ESI
#define uDI_x TYPE_VAR_REGX_EX, REG_EDI
/* 4_rex8 implicit registers (not from modrm) */
#define uDX TYPE_VAR_REGX, REG_EDX
#define ax TYPE_REG, REG_AX
#define cx TYPE_REG, REG_CX
#define dx TYPE_REG, REG_DX
#define bx TYPE_REG, REG_BX
#define sp TYPE_REG, REG_SP
#define bp TYPE_REG, REG_BP
#define si TYPE_REG, REG_SI
#define di TYPE_REG, REG_DI
#define al TYPE_REG, REG_AL
#define cl TYPE_REG, REG_CL
#define dl TYPE_REG, REG_DL
#define bl TYPE_REG, REG_BL
#define ah TYPE_REG, REG_AH
#define ch TYPE_REG, REG_CH
#define dh TYPE_REG, REG_DH
#define bh TYPE_REG, REG_BH
#define eax TYPE_REG, REG_EAX
#define ecx TYPE_REG, REG_ECX
#define edx TYPE_REG, REG_EDX
#define ebx TYPE_REG, REG_EBX
#define esp TYPE_REG, REG_ESP
#define ebp TYPE_REG, REG_EBP
#define esi TYPE_REG, REG_ESI
#define edi TYPE_REG, REG_EDI
#define xsp TYPE_XREG, REG_ESP
#define xbp TYPE_XREG, REG_EBP
#define xcx TYPE_XREG, REG_ECX
#define cs TYPE_REG, SEG_CS
#define ss TYPE_REG, SEG_SS
#define ds TYPE_REG, SEG_DS
#define es TYPE_REG, SEG_ES
#define fs TYPE_REG, SEG_FS
#define gs TYPE_REG, SEG_GS
#define st0 TYPE_REG, REG_ST0
#define st1 TYPE_REG, REG_ST1
#define st2 TYPE_REG, REG_ST2
#define st3 TYPE_REG, REG_ST3
#define st4 TYPE_REG, REG_ST4
#define st5 TYPE_REG, REG_ST5
#define st6 TYPE_REG, REG_ST6
#define st7 TYPE_REG, REG_ST7
#define xmm0 TYPE_REG, REG_XMM0
/* flags */
#define no 0
#define mrm HAS_MODRM
#define xop (HAS_EXTRA_OPERANDS|EXTRAS_IN_CODE_FIELD)
#define mrm_xop (HAS_MODRM|HAS_EXTRA_OPERANDS|EXTRAS_IN_CODE_FIELD)
#define xop_next (HAS_EXTRA_OPERANDS)
#define i64 X64_INVALID
#define o64 X86_INVALID
#define reqp REQUIRES_PREFIX
#define vex REQUIRES_VEX
#define rex REQUIRES_REX
#define reqL0 REQUIRES_VEX_L_0
#define reqL1 REQUIRES_VEX_L_1
#define predcc HAS_PRED_CC
#define predcx HAS_PRED_COMPLEX
#define evex REQUIRES_EVEX
#define reqLL0 REQUIRES_EVEX_LL_0
#define reqLL1 REQUIRES_EVEX_LL_1
/* eflags */
#define x 0
#define fRC EFLAGS_READ_CF
#define fRP EFLAGS_READ_PF
#define fRA EFLAGS_READ_AF
#define fRZ EFLAGS_READ_ZF
#define fRS EFLAGS_READ_SF
#define fRT EFLAGS_READ_TF
#define fRI EFLAGS_READ_IF
#define fRD EFLAGS_READ_DF
#define fRO EFLAGS_READ_OF
#define fRN EFLAGS_READ_NT
#define fRR EFLAGS_READ_RF
#define fRX EFLAGS_READ_ALL
#define fR6 EFLAGS_READ_6
#define fWC EFLAGS_WRITE_CF
#define fWP EFLAGS_WRITE_PF
#define fWA EFLAGS_WRITE_AF
#define fWZ EFLAGS_WRITE_ZF
#define fWS EFLAGS_WRITE_SF
#define fWT EFLAGS_WRITE_TF
#define fWI EFLAGS_WRITE_IF
#define fWD EFLAGS_WRITE_DF
#define fWO EFLAGS_WRITE_OF
#define fWN EFLAGS_WRITE_NT
#define fWR EFLAGS_WRITE_RF
#define fWX EFLAGS_WRITE_ALL
#define fW6 EFLAGS_WRITE_6
/* flags affected by OP_int*
* FIXME: should we add AC and VM flags?
*/
#define fINT (fRX|fWT|fWN|fWI|fWR)
/* for constructing linked lists of table entries */
#define NA 0
#define END_LIST 0
#define tfb (ptr_int_t)&first_byte
#define tsb (ptr_int_t)&second_byte
#define tex (ptr_int_t)&base_extensions
#define t38 (ptr_int_t)&third_byte_38
#define t3a (ptr_int_t)&third_byte_3a
#define tpe (ptr_int_t)&prefix_extensions
#define tvex (ptr_int_t)&e_vex_extensions
#define modx (ptr_int_t)&mod_extensions
#define tre (ptr_int_t)&rep_extensions
#define tne (ptr_int_t)&repne_extensions
#define tfl (ptr_int_t)&float_low_modrm
#define tfh (ptr_int_t)&float_high_modrm
#define exop (ptr_int_t)&extra_operands
#define t64e (ptr_int_t)&x64_extensions
#define trexb (ptr_int_t)&rex_b_extensions
#define trexw (ptr_int_t)&rex_w_extensions
#define tvex (ptr_int_t)&e_vex_extensions
#define tvexw (ptr_int_t)&vex_W_extensions
#define txop (ptr_int_t)&xop_extensions
#define tevexw (ptr_int_t)&evex_W_extensions
/****************************************************************************
* One-byte opcodes
* This is from Tables A-2 & A-3
*/
const instr_info_t first_byte[] = {
/* {op/type, op encoding, name, dst1, dst2, src1, src2, src3, modrm?, eflags, code} */
/* 00 */
{OP_add, 0x000000, "add", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][0]},
{OP_add, 0x010000, "add", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x00]},
{OP_add, 0x020000, "add", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x01]},
{OP_add, 0x030000, "add", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x02]},
{OP_add, 0x040000, "add", al, xx, Ib, al, xx, no, fW6, tfb[0x03]},
{OP_add, 0x050000, "add", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x04]},
{OP_push, 0x060000, "push", xsp, i_xSPo1, es, xsp, xx, i64, x, tfb[0x0e]},
{OP_pop, 0x070000, "pop", es, xsp, xsp, i_xSP, xx, i64, x, tsb[0xa1]},
/* 08 */
{OP_or, 0x080000, "or", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][1]},
{OP_or, 0x090000, "or", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x08]},
{OP_or, 0x0a0000, "or", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x09]},
{OP_or, 0x0b0000, "or", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x0a]},
{OP_or, 0x0c0000, "or", al, xx, Ib, al, xx, no, fW6, tfb[0x0b]},
{OP_or, 0x0d0000, "or", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x0c]},
{OP_push,0x0e0000, "push", xsp, i_xSPo1, cs, xsp, xx, i64, x, tfb[0x16]},
{ESCAPE, 0x0f0000, "(escape)", xx, xx, xx, xx, xx, no, x, NA},
/* 10 */
{OP_adc, 0x100000, "adc", Eb, xx, Gb, Eb, xx, mrm, (fW6|fRC), tex[1][2]},
{OP_adc, 0x110000, "adc", Ev, xx, Gv, Ev, xx, mrm, (fW6|fRC), tfb[0x10]},
{OP_adc, 0x120000, "adc", Gb, xx, Eb, Gb, xx, mrm, (fW6|fRC), tfb[0x11]},
{OP_adc, 0x130000, "adc", Gv, xx, Ev, Gv, xx, mrm, (fW6|fRC), tfb[0x12]},
{OP_adc, 0x140000, "adc", al, xx, Ib, al, xx, no, (fW6|fRC), tfb[0x13]},
{OP_adc, 0x150000, "adc", eAX, xx, Iz, eAX, xx, no, (fW6|fRC), tfb[0x14]},
{OP_push, 0x160000, "push", xsp, i_xSPo1, ss, xsp, xx, i64, x, tfb[0x1e]},
{OP_pop, 0x170000, "pop", ss, xsp, xsp, i_xSP, xx, i64, x, tfb[0x1f]},
/* 18 */
{OP_sbb, 0x180000, "sbb", Eb, xx, Gb, Eb, xx, mrm, (fW6|fRC), tex[1][3]},
{OP_sbb, 0x190000, "sbb", Ev, xx, Gv, Ev, xx, mrm, (fW6|fRC), tfb[0x18]},
{OP_sbb, 0x1a0000, "sbb", Gb, xx, Eb, Gb, xx, mrm, (fW6|fRC), tfb[0x19]},
{OP_sbb, 0x1b0000, "sbb", Gv, xx, Ev, Gv, xx, mrm, (fW6|fRC), tfb[0x1a]},
{OP_sbb, 0x1c0000, "sbb", al, xx, Ib, al, xx, no, (fW6|fRC), tfb[0x1b]},
{OP_sbb, 0x1d0000, "sbb", eAX, xx, Iz, eAX, xx, no, (fW6|fRC), tfb[0x1c]},
{OP_push, 0x1e0000, "push", xsp, i_xSPo1, ds, xsp, xx, i64, x, tsb[0xa0]},
{OP_pop, 0x1f0000, "pop", ds, xsp, xsp, i_xSP, xx, i64, x, tfb[0x07]},
/* 20 */
{OP_and, 0x200000, "and", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][4]},
{OP_and, 0x210000, "and", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x20]},
{OP_and, 0x220000, "and", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x21]},
{OP_and, 0x230000, "and", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x22]},
{OP_and, 0x240000, "and", al, xx, Ib, al, xx, no, fW6, tfb[0x23]},
{OP_and, 0x250000, "and", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x24]},
{PREFIX, 0x260000, "es", xx, xx, xx, xx, xx, no, x, SEG_ES},
{OP_daa, 0x270000, "daa", al, xx, al, xx, xx, i64, (fW6|fRC|fRA), END_LIST},
/* 28 */
{OP_sub, 0x280000, "sub", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][5]},
{OP_sub, 0x290000, "sub", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x28]},
{OP_sub, 0x2a0000, "sub", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x29]},
{OP_sub, 0x2b0000, "sub", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x2a]},
{OP_sub, 0x2c0000, "sub", al, xx, Ib, al, xx, no, fW6, tfb[0x2b]},
{OP_sub, 0x2d0000, "sub", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x2c]},
{PREFIX, 0x2e0000, "cs", xx, xx, xx, xx, xx, no, x, SEG_CS},
{OP_das, 0x2f0000, "das", al, xx, al, xx, xx, i64, (fW6|fRC|fRA), END_LIST},
/* 30 */
{OP_xor, 0x300000, "xor", Eb, xx, Gb, Eb, xx, mrm, fW6, tex[1][6]},
{OP_xor, 0x310000, "xor", Ev, xx, Gv, Ev, xx, mrm, fW6, tfb[0x30]},
{OP_xor, 0x320000, "xor", Gb, xx, Eb, Gb, xx, mrm, fW6, tfb[0x31]},
{OP_xor, 0x330000, "xor", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x32]},
{OP_xor, 0x340000, "xor", al, xx, Ib, al, xx, no, fW6, tfb[0x33]},
{OP_xor, 0x350000, "xor", eAX, xx, Iz, eAX, xx, no, fW6, tfb[0x34]},
{PREFIX, 0x360000, "ss", xx, xx, xx, xx, xx, no, x, SEG_SS},
{OP_aaa, 0x370000, "aaa", ax, xx, ax, xx, xx, i64, (fW6|fRA), END_LIST},
/* 38 */
{OP_cmp, 0x380000, "cmp", xx, xx, Eb, Gb, xx, mrm, fW6, tex[1][7]},
{OP_cmp, 0x390000, "cmp", xx, xx, Ev, Gv, xx, mrm, fW6, tfb[0x38]},
{OP_cmp, 0x3a0000, "cmp", xx, xx, Gb, Eb, xx, mrm, fW6, tfb[0x39]},
{OP_cmp, 0x3b0000, "cmp", xx, xx, Gv, Ev, xx, mrm, fW6, tfb[0x3a]},
{OP_cmp, 0x3c0000, "cmp", xx, xx, al, Ib, xx, no, fW6, tfb[0x3b]},
{OP_cmp, 0x3d0000, "cmp", xx, xx, eAX, Iz, xx, no, fW6, tfb[0x3c]},
{PREFIX, 0x3e0000, "ds", xx, xx, xx, xx, xx, no, x, SEG_DS},
{OP_aas, 0x3f0000, "aas", ax, xx, ax, xx, xx, i64, (fW6|fRA), END_LIST},
/* 40 */
{X64_EXT, 0x400000, "(x64_ext 0)", xx, xx, xx, xx, xx, no, x, 0},
{X64_EXT, 0x410000, "(x64_ext 1)", xx, xx, xx, xx, xx, no, x, 1},
{X64_EXT, 0x420000, "(x64_ext 2)", xx, xx, xx, xx, xx, no, x, 2},
{X64_EXT, 0x430000, "(x64_ext 3)", xx, xx, xx, xx, xx, no, x, 3},
{X64_EXT, 0x440000, "(x64_ext 4)", xx, xx, xx, xx, xx, no, x, 4},
{X64_EXT, 0x450000, "(x64_ext 5)", xx, xx, xx, xx, xx, no, x, 5},
{X64_EXT, 0x460000, "(x64_ext 6)", xx, xx, xx, xx, xx, no, x, 6},
{X64_EXT, 0x470000, "(x64_ext 7)", xx, xx, xx, xx, xx, no, x, 7},
/* 48 */
{X64_EXT, 0x480000, "(x64_ext 8)", xx, xx, xx, xx, xx, no, x, 8},
{X64_EXT, 0x490000, "(x64_ext 9)", xx, xx, xx, xx, xx, no, x, 9},
{X64_EXT, 0x4a0000, "(x64_ext 10)", xx, xx, xx, xx, xx, no, x, 10},
{X64_EXT, 0x4b0000, "(x64_ext 11)", xx, xx, xx, xx, xx, no, x, 11},
{X64_EXT, 0x4c0000, "(x64_ext 12)", xx, xx, xx, xx, xx, no, x, 12},
{X64_EXT, 0x4d0000, "(x64_ext 13)", xx, xx, xx, xx, xx, no, x, 13},
{X64_EXT, 0x4e0000, "(x64_ext 14)", xx, xx, xx, xx, xx, no, x, 14},
{X64_EXT, 0x4f0000, "(x64_ext 15)", xx, xx, xx, xx, xx, no, x, 15},
/* 50 */
{OP_push, 0x500000, "push", xsp, i_xSPo1, xAX_x, xsp, xx, no, x, tfb[0x51]},
{OP_push, 0x510000, "push", xsp, i_xSPo1, xCX_x, xsp, xx, no, x, tfb[0x52]},
{OP_push, 0x520000, "push", xsp, i_xSPo1, xDX_x, xsp, xx, no, x, tfb[0x53]},
{OP_push, 0x530000, "push", xsp, i_xSPo1, xBX_x, xsp, xx, no, x, tfb[0x54]},
{OP_push, 0x540000, "push", xsp, i_xSPo1, xSP_x, xsp, xx, no, x, tfb[0x55]},
{OP_push, 0x550000, "push", xsp, i_xSPo1, xBP_x, xsp, xx, no, x, tfb[0x56]},
{OP_push, 0x560000, "push", xsp, i_xSPo1, xSI_x, xsp, xx, no, x, tfb[0x57]},
{OP_push, 0x570000, "push", xsp, i_xSPo1, xDI_x, xsp, xx, no, x, tex[12][6]},
/* 58 */
{OP_pop, 0x580000, "pop", xAX_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x59]},
{OP_pop, 0x590000, "pop", xCX_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5a]},
{OP_pop, 0x5a0000, "pop", xDX_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5b]},
{OP_pop, 0x5b0000, "pop", xBX_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5c]},
{OP_pop, 0x5c0000, "pop", xSP_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5d]},
{OP_pop, 0x5d0000, "pop", xBP_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5e]},
{OP_pop, 0x5e0000, "pop", xSI_x, xsp, xsp, i_xSP, xx, no, x, tfb[0x5f]},
{OP_pop, 0x5f0000, "pop", xDI_x, xsp, xsp, i_xSP, xx, no, x, tex[26][0]},
/* 60 */
{OP_pusha, 0x600000, "pusha", xsp, i_xSPo8, xsp, eAX, eBX, xop|i64, x, exop[0x00]},
{OP_popa, 0x610000, "popa", xsp, eAX, xsp, i_xSPs8, xx, xop|i64, x, exop[0x02]},
{EVEX_PREFIX_EXT, 0x620000, "(evex_prefix_ext)", xx, xx, xx, xx, xx, no, x, END_LIST},
{X64_EXT, 0x630000, "(x64_ext 16)", xx, xx, xx, xx, xx, no, x, 16},
{PREFIX, 0x640000, "fs", xx, xx, xx, xx, xx, no, x, SEG_FS},
{PREFIX, 0x650000, "gs", xx, xx, xx, xx, xx, no, x, SEG_GS},
{PREFIX, 0x660000, "data size", xx, xx, xx, xx, xx, no, x, PREFIX_DATA},
{PREFIX, 0x670000, "addr size", xx, xx, xx, xx, xx, no, x, PREFIX_ADDR},
/* 68 */
{OP_push_imm, 0x680000, "push", xsp, i_xSPo1, Iz, xsp, xx, no, x, tfb[0x6a]},
{OP_imul, 0x690000, "imul", Gv, xx, Ev, Iz, xx, mrm, fW6, tfb[0x6b]},
{OP_push_imm, 0x6a0000, "push", xsp, i_xSPo1, Ib, xsp, xx, no, x, END_LIST},/* sign-extend to push 2/4/8 bytes */
{OP_imul, 0x6b0000, "imul", Gv, xx, Ev, Ib, xx, mrm, fW6, END_LIST},
{REP_EXT, 0x6c0000, "((rep) ins)", Yb, xx, i_dx, xx, xx, no, fRD, 0},
{REP_EXT, 0x6d0000, "((rep) ins)", Yz, xx, i_dx, xx, xx, no, fRD, 1},
{REP_EXT, 0x6e0000, "((rep) outs)", i_dx, xx, Xb, xx, xx, no, fRD, 2},
{REP_EXT, 0x6f0000, "((rep) outs)", i_dx, xx, Xz, xx, xx, no, fRD, 3},
/* 70 */
{OP_jo_short, 0x700000, "jo", xx, xx, Jb, xx, xx, no, fRO, END_LIST},
{OP_jno_short, 0x710000, "jno", xx, xx, Jb, xx, xx, no, fRO, END_LIST},
{OP_jb_short, 0x720000, "jb", xx, xx, Jb, xx, xx, no, fRC, END_LIST},
{OP_jnb_short, 0x730000, "jnb", xx, xx, Jb, xx, xx, no, fRC, END_LIST},
{OP_jz_short, 0x740000, "jz", xx, xx, Jb, xx, xx, no, fRZ, END_LIST},
{OP_jnz_short, 0x750000, "jnz", xx, xx, Jb, xx, xx, no, fRZ, END_LIST},
{OP_jbe_short, 0x760000, "jbe", xx, xx, Jb, xx, xx, no, (fRC|fRZ), END_LIST},
{OP_jnbe_short,0x770000, "jnbe",xx, xx, Jb, xx, xx, no, (fRC|fRZ), END_LIST},
/* 78 */
{OP_js_short, 0x780000, "js", xx, xx, Jb, xx, xx, no, fRS, END_LIST},
{OP_jns_short, 0x790000, "jns", xx, xx, Jb, xx, xx, no, fRS, END_LIST},
{OP_jp_short, 0x7a0000, "jp", xx, xx, Jb, xx, xx, no, fRP, END_LIST},
{OP_jnp_short, 0x7b0000, "jnp", xx, xx, Jb, xx, xx, no, fRP, END_LIST},
{OP_jl_short, 0x7c0000, "jl", xx, xx, Jb, xx, xx, no, (fRS|fRO), END_LIST},
{OP_jnl_short, 0x7d0000, "jnl", xx, xx, Jb, xx, xx, no, (fRS|fRO), END_LIST},
{OP_jle_short, 0x7e0000, "jle", xx, xx, Jb, xx, xx, no, (fRS|fRO|fRZ), END_LIST},
{OP_jnle_short,0x7f0000, "jnle",xx, xx, Jb, xx, xx, no, (fRS|fRO|fRZ), END_LIST},
/* 80 */
{EXTENSION, 0x800000, "(group 1a)", Eb, xx, Ib, xx, xx, mrm, x, 0},
{EXTENSION, 0x810000, "(group 1b)", Ev, xx, Iz, xx, xx, mrm, x, 1},
{EXTENSION, 0x820000, "(group 1c*)", Ev, xx, Ib, xx, xx, mrm|i64, x, 25}, /* PR 235092: gnu tools (gdb, objdump) think this is a bad opcode but windbg and the hardware disagree */
{EXTENSION, 0x830000, "(group 1c)", Ev, xx, Ib, xx, xx, mrm, x, 2},
{OP_test, 0x840000, "test", xx, xx, Eb, Gb, xx, mrm, fW6, tex[10][0]},
{OP_test, 0x850000, "test", xx, xx, Ev, Gv, xx, mrm, fW6, tfb[0x84]},
{OP_xchg, 0x860000, "xchg", Eb, Gb, Eb, Gb, xx, mrm, x, END_LIST},
{OP_xchg, 0x870000, "xchg", Ev, Gv, Ev, Gv, xx, mrm, x, tfb[0x86]},
/* 88 */
{OP_mov_st, 0x880000, "mov", Eb, xx, Gb, xx, xx, mrm, x, tex[18][0]},
{OP_mov_st, 0x890000, "mov", Ev, xx, Gv, xx, xx, mrm, x, tfb[0x88]},
{OP_mov_ld, 0x8a0000, "mov", Gb, xx, Eb, xx, xx, mrm, x, END_LIST},
{OP_mov_ld, 0x8b0000, "mov", Gv, xx, Ev, xx, xx, mrm, x, tfb[0x8a]},
{OP_mov_seg, 0x8c0000, "mov", Ev, xx, Sw, xx, xx, mrm, x, END_LIST},
{OP_lea, 0x8d0000, "lea", Gv, xx, Mm, xx, xx, mrm, x, END_LIST}, /* Intel has just M */
{OP_mov_seg, 0x8e0000, "mov", Sw, xx, Ev, xx, xx, mrm, x, tfb[0x8c]},
{XOP_PREFIX_EXT, 0x8f0000, "(xop_prefix_ext 0)", xx, xx, xx, xx, xx, no, x, 0},
/* 90 */
{PREFIX_EXT, 0x900000, "(prefix ext 103)", xx, xx, xx, xx, xx, no, x, 103},
{OP_xchg, 0x910000, "xchg", eCX_x, eAX, eCX_x, eAX, xx, no, x, tfb[0x92]},
{OP_xchg, 0x920000, "xchg", eDX_x, eAX, eDX_x, eAX, xx, no, x, tfb[0x93]},
{OP_xchg, 0x930000, "xchg", eBX_x, eAX, eBX_x, eAX, xx, no, x, tfb[0x94]},
{OP_xchg, 0x940000, "xchg", eSP_x, eAX, eSP_x, eAX, xx, no, x, tfb[0x95]},
{OP_xchg, 0x950000, "xchg", eBP_x, eAX, eBP_x, eAX, xx, no, x, tfb[0x96]},
{OP_xchg, 0x960000, "xchg", eSI_x, eAX, eSI_x, eAX, xx, no, x, tfb[0x97]},
{OP_xchg, 0x970000, "xchg", eDI_x, eAX, eDI_x, eAX, xx, no, x, tfb[0x87]},
/* 98 */
{OP_cwde, 0x980000, "cwde", eAX, xx, ax, xx, xx, no, x, END_LIST},/*16-bit=="cbw", src is al not ax; FIXME: newer gdb calls it "cwtl"?!?*/
/* PR 354096: does not write to ax/eax/rax: sign-extends into dx/edx/rdx */
{OP_cdq, 0x990000, "cdq", eDX, xx, eAX, xx, xx, no, x, END_LIST},/*16-bit=="cwd";64-bit=="cqo"*/
{OP_call_far, 0x9a0000, "lcall", xsp, i_vSPo2, Ap, xsp, xx, i64, x, END_LIST},
{OP_fwait, 0x9b0000, "fwait", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pushf, 0x9c0000, "pushf", xsp, i_xSPo1, xsp, xx, xx, no, fRX, END_LIST},
{OP_popf, 0x9d0000, "popf", xsp, xx, xsp, i_xSP, xx, no, fWX, END_LIST},
{OP_sahf, 0x9e0000, "sahf", xx, xx, ah, xx, xx, no, (fW6&(~fWO)), END_LIST},
{OP_lahf, 0x9f0000, "lahf", ah, xx, xx, xx, xx, no, (fR6&(~fRO)), END_LIST},
/* a0 */
{OP_mov_ld, 0xa00000, "mov", al, xx, Ob, xx, xx, no, x, tfb[0x8b]},
{OP_mov_ld, 0xa10000, "mov", eAX, xx, Ov, xx, xx, no, x, tfb[0xa0]},
{OP_mov_st, 0xa20000, "mov", Ob, xx, al, xx, xx, no, x, tfb[0x89]},
{OP_mov_st, 0xa30000, "mov", Ov, xx, eAX, xx, xx, no, x, tfb[0xa2]},
{REP_EXT, 0xa40000, "((rep) movs)", Yb, xx, Xb, xx, xx, no, fRD, 4},
{REP_EXT, 0xa50000, "((rep) movs)", Yv, xx, Xv, xx, xx, no, fRD, 5},
{REPNE_EXT, 0xa60000, "((rep/ne) cmps)", Xb, xx, Yb, xx, xx, no, (fW6|fRD|fRZ), 0},
{REPNE_EXT, 0xa70000, "((rep/ne) cmps)", Xv, xx, Yv, xx, xx, no, (fW6|fRD|fRZ), 1},
/* a8 */
{OP_test, 0xa80000, "test", xx, xx, al, Ib, xx, no, fW6, tfb[0x85]},
{OP_test, 0xa90000, "test", xx, xx, eAX, Iz, xx, no, fW6, tfb[0xa8]},
{REP_EXT, 0xaa0000, "((rep) stos)", Yb, xx, al, xx, xx, no, fRD, 6},
{REP_EXT, 0xab0000, "((rep) stos)", Yv, xx, eAX, xx, xx, no, fRD, 7},
{REP_EXT, 0xac0000, "((rep) lods)", al, xx, Xb, xx, xx, no, fRD, 8},
{REP_EXT, 0xad0000, "((rep) lods)", eAX, xx, Xv, xx, xx, no, fRD, 9},
{REPNE_EXT, 0xae0000, "((rep/ne) scas)", al, xx, Yb, xx, xx, no, (fW6|fRD|fRZ), 2},
{REPNE_EXT, 0xaf0000, "((rep/ne) scas)", eAX, xx, Yv, xx, xx, no, (fW6|fRD|fRZ), 3},
/* b0 */
{OP_mov_imm, 0xb00000, "mov", al_x, xx, Ib, xx, xx, no, x, tfb[0xb1]},
{OP_mov_imm, 0xb10000, "mov", cl_x, xx, Ib, xx, xx, no, x, tfb[0xb2]},
{OP_mov_imm, 0xb20000, "mov", dl_x, xx, Ib, xx, xx, no, x, tfb[0xb3]},
{OP_mov_imm, 0xb30000, "mov", bl_x, xx, Ib, xx, xx, no, x, tfb[0xb4]},
{OP_mov_imm, 0xb40000, "mov", ah_x, xx, Ib, xx, xx, no, x, tfb[0xb5]},
{OP_mov_imm, 0xb50000, "mov", ch_x, xx, Ib, xx, xx, no, x, tfb[0xb6]},
{OP_mov_imm, 0xb60000, "mov", dh_x, xx, Ib, xx, xx, no, x, tfb[0xb7]},
/* PR 250397: we point at the tail end of the mov_st templates */
{OP_mov_imm, 0xb70000, "mov", bh_x, xx, Ib, xx, xx, no, x, tex[18][0]},
/* b8 */
{OP_mov_imm, 0xb80000, "mov", eAX_x, xx, Iv, xx, xx, no, x, tfb[0xb9]},
{OP_mov_imm, 0xb90000, "mov", eCX_x, xx, Iv, xx, xx, no, x, tfb[0xba]},
{OP_mov_imm, 0xba0000, "mov", eDX_x, xx, Iv, xx, xx, no, x, tfb[0xbb]},
{OP_mov_imm, 0xbb0000, "mov", eBX_x, xx, Iv, xx, xx, no, x, tfb[0xbc]},
{OP_mov_imm, 0xbc0000, "mov", eSP_x, xx, Iv, xx, xx, no, x, tfb[0xbd]},
{OP_mov_imm, 0xbd0000, "mov", eBP_x, xx, Iv, xx, xx, no, x, tfb[0xbe]},
{OP_mov_imm, 0xbe0000, "mov", eSI_x, xx, Iv, xx, xx, no, x, tfb[0xbf]},
{OP_mov_imm, 0xbf0000, "mov", eDI_x, xx, Iv, xx, xx, no, x, tfb[0xb0]},
/* c0 */
{EXTENSION, 0xc00000, "(group 2a)", Eb, xx, Ib, xx, xx, mrm, x, 3},
{EXTENSION, 0xc10000, "(group 2b)", Ev, xx, Ib, xx, xx, mrm, x, 4},
{OP_ret, 0xc20000, "ret", xsp, xx, Iw, xsp, i_iSP, no, x, tfb[0xc3]},
{OP_ret, 0xc30000, "ret", xsp, xx, xsp, i_iSP, xx, no, x, END_LIST},
{VEX_PREFIX_EXT, 0xc40000, "(vex_prefix_ext 0)", xx, xx, xx, xx, xx, no, x, 0},
{VEX_PREFIX_EXT, 0xc50000, "(vex_prefix_ext 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXTENSION, 0xc60000, "(group 11a)", Eb, xx, Ib, xx, xx, mrm, x, 17},
{EXTENSION, 0xc70000, "(group 11b)", Ev, xx, Iz, xx, xx, mrm, x, 18},
/* c8 */
{OP_enter, 0xc80000, "enter", xsp, i_xSPoN, Iw, Ib, xsp, xop, x, exop[0x05]},
{OP_leave, 0xc90000, "leave", xsp, xbp, xbp, xsp, i_xBP, no, x, END_LIST},
{OP_ret_far, 0xca0000, "lret", xsp, xx, Iw, xsp, i_vSPs2, no, x, tfb[0xcb]},
{OP_ret_far, 0xcb0000, "lret", xsp, xx, xsp, i_vSPs2, xx, no, x, END_LIST},
/* we ignore the operations on the kernel stack */
{OP_int3, 0xcc0000, "int3", xx, xx, xx, xx, xx, no, fINT, END_LIST},
{OP_int, 0xcd0000, "int", xx, xx, Ib, xx, xx, no, fINT, END_LIST},
{OP_into, 0xce0000, "into", xx, xx, xx, xx, xx, i64, fINT, END_LIST},
{OP_iret, 0xcf0000, "iret", xsp, xx, xsp, i_vSPs3, xx, no, fWX, END_LIST},
/* d0 */
{EXTENSION, 0xd00000, "(group 2c)", Eb, xx, c1, xx, xx, mrm, x, 5},
{EXTENSION, 0xd10000, "(group 2d)", Ev, xx, c1, xx, xx, mrm, x, 6},
{EXTENSION, 0xd20000, "(group 2e)", Eb, xx, cl, xx, xx, mrm, x, 7},
{EXTENSION, 0xd30000, "(group 2f)", Ev, xx, cl, xx, xx, mrm, x, 8},
{OP_aam, 0xd40000, "aam", ax, xx, Ib, ax, xx, i64, fW6, END_LIST},
{OP_aad, 0xd50000, "aad", ax, xx, Ib, ax, xx, i64, fW6, END_LIST},
{OP_salc, 0xd60000, "salc", al, xx, xx, xx, xx, i64, fRC, END_LIST},/*undocumented*/
{OP_xlat, 0xd70000, "xlat", al, xx, Zb, xx, xx, no, x, END_LIST},
/* d8 */
{FLOAT_EXT, 0xd80000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},/* all floats need modrm */
{FLOAT_EXT, 0xd90000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xda0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xdb0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xdc0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xdd0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xde0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
{FLOAT_EXT, 0xdf0000, "(float)", xx, xx, xx, xx, xx, mrm, x, NA},
/* e0 */
{OP_loopne,0xe00000, "loopne", axCX, xx, Jb, axCX, xx, no, fRZ, END_LIST},
{OP_loope, 0xe10000, "loope", axCX, xx, Jb, axCX, xx, no, fRZ, END_LIST},
{OP_loop, 0xe20000, "loop", axCX, xx, Jb, axCX, xx, no, x, END_LIST},
{OP_jecxz, 0xe30000, "jecxz", xx, xx, Jb, axCX, xx, no, x, END_LIST},/*16-bit=="jcxz",64-bit="jrcxz"*/
/* FIXME: in & out access "I/O ports", are these memory addresses?
* if so, change Ib to Ob and change dx to i_dx (move to dest for out)
*/
{OP_in, 0xe40000, "in", al, xx, Ib, xx, xx, no, x, tfb[0xed]},
{OP_in, 0xe50000, "in", zAX, xx, Ib, xx, xx, no, x, tfb[0xe4]},
{OP_out, 0xe60000, "out", xx, xx, Ib, al, xx, no, x, tfb[0xef]},
{OP_out, 0xe70000, "out", xx, xx, Ib, zAX, xx, no, x, tfb[0xe6]},
/* e8 */
{OP_call, 0xe80000, "call", xsp, i_iSPo1, Jz, xsp, xx, no, x, END_LIST},
{OP_jmp, 0xe90000, "jmp", xx, xx, Jz, xx, xx, no, x, END_LIST},
{OP_jmp_far, 0xea0000, "ljmp", xx, xx, Ap, xx, xx, i64, x, END_LIST},
{OP_jmp_short, 0xeb0000, "jmp", xx, xx, Jb, xx, xx, no, x, END_LIST},
{OP_in, 0xec0000, "in", al, xx, dx, xx, xx, no, x, END_LIST},
{OP_in, 0xed0000, "in", zAX, xx, dx, xx, xx, no, x, tfb[0xec]},
{OP_out, 0xee0000, "out", xx, xx, al, dx, xx, no, x, END_LIST},
{OP_out, 0xef0000, "out", xx, xx, zAX, dx, xx, no, x, tfb[0xee]},
/* f0 */
{PREFIX, 0xf00000, "lock", xx, xx, xx, xx, xx, no, x, PREFIX_LOCK},
/* Also called OP_icebp. Undocumented. I'm assuming looks like OP_int* */
{OP_int1, 0xf10000, "int1", xx, xx, xx, xx, xx, no, fINT, END_LIST},
{PREFIX, 0xf20000, "repne", xx, xx, xx, xx, xx, no, x, PREFIX_REPNE},
{PREFIX, 0xf30000, "rep", xx, xx, xx, xx, xx, no, x, PREFIX_REP},
{OP_hlt, 0xf40000, "hlt", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_cmc, 0xf50000, "cmc", xx, xx, xx, xx, xx, no, fWC, END_LIST},
{EXTENSION, 0xf60000, "(group 3a)", Eb, xx, xx, xx, xx, mrm, x, 9},
{EXTENSION, 0xf70000, "(group 3b)", Ev, xx, xx, xx, xx, mrm, x, 10},
/* f8 */
{OP_clc, 0xf80000, "clc", xx, xx, xx, xx, xx, no, fWC, END_LIST},
{OP_stc, 0xf90000, "stc", xx, xx, xx, xx, xx, no, fWC, END_LIST},
{OP_cli, 0xfa0000, "cli", xx, xx, xx, xx, xx, no, fWI, END_LIST},
{OP_sti, 0xfb0000, "sti", xx, xx, xx, xx, xx, no, fWI, END_LIST},
{OP_cld, 0xfc0000, "cld", xx, xx, xx, xx, xx, no, fWD, END_LIST},
{OP_std, 0xfd0000, "std", xx, xx, xx, xx, xx, no, fWD, END_LIST},
{EXTENSION, 0xfe0000, "(group 4)", xx, xx, xx, xx, xx, mrm, x, 11},
{EXTENSION, 0xff0000, "(group 5)", xx, xx, xx, xx, xx, mrm, x, 12},
};
/****************************************************************************
* Two-byte opcodes
* This is from Tables A-4 & A-5
*/
const instr_info_t second_byte[] = {
/* 00 */
{EXTENSION, 0x0f0010, "(group 6)", xx, xx, xx, xx, xx, mrm, x, 13},
{EXTENSION, 0x0f0110, "(group 7)", xx, xx, xx, xx, xx, mrm, x, 14},
{OP_lar, 0x0f0210, "lar", Gv, xx, Ew, xx, xx, mrm, fWZ, END_LIST},
{OP_lsl, 0x0f0310, "lsl", Gv, xx, Ew, xx, xx, mrm, fWZ, END_LIST},
{INVALID, 0x0f0410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX: writes ss and cs */
{OP_syscall, 0x0f0510, "syscall", xcx, xx, xx, xx, xx, no, x, NA}, /* AMD/x64 only */
{OP_clts, 0x0f0610, "clts", xx, xx, xx, xx, xx, no, x, END_LIST},
/* XXX: writes ss and cs */
{OP_sysret, 0x0f0710, "sysret", xx, xx, xx, xx, xx, no, x, NA}, /* AMD/x64 only */
/* 08 */
{OP_invd, 0x0f0810, "invd", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_wbinvd, 0x0f0910, "wbinvd", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f0a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_ud2a, 0x0f0b10, "ud2a", xx, xx, xx, xx, xx, no, x, END_LIST}, /* "undefined instr" instr */
{INVALID, 0x0f0c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EXTENSION, 0x0f0d10, "(group amd)", xx, xx, xx, xx, xx, mrm, x, 24}, /* AMD only */
{OP_femms, 0x0f0e10, "femms", xx, xx, xx, xx, xx, no, x, END_LIST},
{SUFFIX_EXT, 0x0f0f10, "(group 3DNow!)", xx, xx, xx, xx, xx, mrm, x, 0},
/* 10 */
{PREFIX_EXT, 0x0f1010, "(prefix ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},
{PREFIX_EXT, 0x0f1110, "(prefix ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},
{PREFIX_EXT, 0x0f1210, "(prefix ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},
{PREFIX_EXT, 0x0f1310, "(prefix ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},
{PREFIX_EXT, 0x0f1410, "(prefix ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},
{PREFIX_EXT, 0x0f1510, "(prefix ext 5)", xx, xx, xx, xx, xx, mrm, x, 5},
{PREFIX_EXT, 0x0f1610, "(prefix ext 6)", xx, xx, xx, xx, xx, mrm, x, 6},
{PREFIX_EXT, 0x0f1710, "(prefix ext 7)", xx, xx, xx, xx, xx, mrm, x, 7},
/* 18 */
{EXTENSION, 0x0f1810, "(group 16)", xx, xx, xx, xx, xx, mrm, x, 23},
/* xref case 9862/PR 214297 : 0f19-0f1e are "HINT_NOP": valid on P6+.
* we treat them the same as 0f1f but do not put on encoding chain.
* The operand is ignored but to support encoding it we must list it.
* i453: analysis routines now special case nop_modrm to ignore src opnd */
{OP_nop_modrm, 0x0f1910, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1a10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1b10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1c10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1d10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1e10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1f10, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
/* 20 */
{OP_mov_priv, 0x0f2010, "mov", Rr, xx, Cr, xx, xx, mrm, fW6, tsb[0x21]},
{OP_mov_priv, 0x0f2110, "mov", Rr, xx, Dr, xx, xx, mrm, fW6, tsb[0x22]},
{OP_mov_priv, 0x0f2210, "mov", Cr, xx, Rr, xx, xx, mrm, fW6, tsb[0x23]},
{OP_mov_priv, 0x0f2310, "mov", Dr, xx, Rr, xx, xx, mrm, fW6, END_LIST},
{INVALID, 0x0f2410, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* FIXME: gdb thinks ok! */
{INVALID, 0x0f2510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f2610, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* FIXME: gdb thinks ok! */
{INVALID, 0x0f2710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 28 */
{PREFIX_EXT, 0x0f2810, "(prefix ext 8)", xx, xx, xx, xx, xx, mrm, x, 8},
{PREFIX_EXT, 0x0f2910, "(prefix ext 9)", xx, xx, xx, xx, xx, mrm, x, 9},
{PREFIX_EXT, 0x0f2a10, "(prefix ext 10)", xx, xx, xx, xx, xx, mrm, x, 10},
{PREFIX_EXT, 0x0f2b10, "(prefix ext 11)", xx, xx, xx, xx, xx, mrm, x, 11},
{PREFIX_EXT, 0x0f2c10, "(prefix ext 12)", xx, xx, xx, xx, xx, mrm, x, 12},
{PREFIX_EXT, 0x0f2d10, "(prefix ext 13)", xx, xx, xx, xx, xx, mrm, x, 13},
{PREFIX_EXT, 0x0f2e10, "(prefix ext 14)", xx, xx, xx, xx, xx, mrm, x, 14},
{PREFIX_EXT, 0x0f2f10, "(prefix ext 15)", xx, xx, xx, xx, xx, mrm, x, 15},
/* 30 */
{OP_wrmsr, 0x0f3010, "wrmsr", xx, xx, edx, eax, ecx, no, x, END_LIST},
{OP_rdtsc, 0x0f3110, "rdtsc", edx, eax, xx, xx, xx, no, x, END_LIST},
{OP_rdmsr, 0x0f3210, "rdmsr", edx, eax, ecx, xx, xx, no, x, END_LIST},
{OP_rdpmc, 0x0f3310, "rdpmc", edx, eax, ecx, xx, xx, no, x, END_LIST},
/* XXX: sysenter writes cs and ss */
{OP_sysenter, 0x0f3410, "sysenter", xsp, xx, xx, xx, xx, no, x, END_LIST},
/* XXX: sysexit writes cs and ss */
{OP_sysexit, 0x0f3510, "sysexit", xsp, xx, xcx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f3610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#1313: various getsec leaf funcs at CPL 0 write to all kinds of
* processor state including eflags and eip. Leaf funcs are indicated by eax
* value, though. Here we only model the CPL > 0 effects, which conditionally
* write to ebx + ecx.
*/
{OP_getsec, 0x0f3710, "getsec", eax, ebx, eax, ebx, xx, xop|predcx, x, exop[13]},
/* 38 */
{ESCAPE_3BYTE_38, 0x0f3810, "(3byte 38)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{ESCAPE_3BYTE_3a, 0x0f3a10, "(3byte 3a)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f3f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 40 */
{OP_cmovo, 0x0f4010, "cmovo", Gv, xx, Ev, xx, xx, mrm|predcc, fRO, END_LIST},
{E_VEX_EXT, 0x0f4110, "(e_vex ext 83)", xx, xx, xx, xx, xx, mrm, x, 83},
{E_VEX_EXT, 0x0f4210, "(e_vex ext 84)", xx, xx, xx, xx, xx, mrm, x, 84},
{OP_cmovnb, 0x0f4310, "cmovnb", Gv, xx, Ev, xx, xx, mrm|predcc, fRC, END_LIST},
{E_VEX_EXT, 0x0f4410, "(e_vex ext 86)", xx, xx, xx, xx, xx, mrm, x, 86},
{E_VEX_EXT, 0x0f4510, "(e_vex ext 87)", xx, xx, xx, xx, xx, mrm, x, 87},
{E_VEX_EXT, 0x0f4610, "(e_vex ext 88)", xx, xx, xx, xx, xx, mrm, x, 88},
{E_VEX_EXT, 0x0f4710, "(e_vex ext 89)", xx, xx, xx, xx, xx, mrm, x, 89},
/* 48 */
{OP_cmovs, 0x0f4810, "cmovs", Gv, xx, Ev, xx, xx, mrm|predcc, fRS, END_LIST},
{OP_cmovns, 0x0f4910, "cmovns", Gv, xx, Ev, xx, xx, mrm|predcc, fRS, END_LIST},
{E_VEX_EXT, 0x0f4a10, "(e_vex ext 90)", xx, xx, xx, xx, xx, mrm, x, 90},
{E_VEX_EXT, 0x0f4b10, "(e_vex ext 85)", xx, xx, xx, xx, xx, mrm, x, 85},
{OP_cmovl, 0x0f4c10, "cmovl", Gv, xx, Ev, xx, xx, mrm|predcc, (fRS|fRO), END_LIST},
{OP_cmovnl, 0x0f4d10, "cmovnl", Gv, xx, Ev, xx, xx, mrm|predcc, (fRS|fRO), END_LIST},
{OP_cmovle, 0x0f4e10, "cmovle", Gv, xx, Ev, xx, xx, mrm|predcc, (fRS|fRO|fRZ), END_LIST},
{OP_cmovnle,0x0f4f10, "cmovnle",Gv, xx, Ev, xx, xx, mrm|predcc, (fRS|fRO|fRZ), END_LIST},
/* 50 */
{PREFIX_EXT, 0x0f5010, "(prefix ext 16)", xx, xx, xx, xx, xx, mrm, x, 16},
{PREFIX_EXT, 0x0f5110, "(prefix ext 17)", xx, xx, xx, xx, xx, mrm, x, 17},
{PREFIX_EXT, 0x0f5210, "(prefix ext 18)", xx, xx, xx, xx, xx, mrm, x, 18},
{PREFIX_EXT, 0x0f5310, "(prefix ext 19)", xx, xx, xx, xx, xx, mrm, x, 19},
{PREFIX_EXT, 0x0f5410, "(prefix ext 20)", xx, xx, xx, xx, xx, mrm, x, 20},
{PREFIX_EXT, 0x0f5510, "(prefix ext 21)", xx, xx, xx, xx, xx, mrm, x, 21},
{PREFIX_EXT, 0x0f5610, "(prefix ext 22)", xx, xx, xx, xx, xx, mrm, x, 22},
{PREFIX_EXT, 0x0f5710, "(prefix ext 23)", xx, xx, xx, xx, xx, mrm, x, 23},
/* 58 */
{PREFIX_EXT, 0x0f5810, "(prefix ext 24)", xx, xx, xx, xx, xx, mrm, x, 24},
{PREFIX_EXT, 0x0f5910, "(prefix ext 25)", xx, xx, xx, xx, xx, mrm, x, 25},
{PREFIX_EXT, 0x0f5a10, "(prefix ext 26)", xx, xx, xx, xx, xx, mrm, x, 26},
{PREFIX_EXT, 0x0f5b10, "(prefix ext 27)", xx, xx, xx, xx, xx, mrm, x, 27},
{PREFIX_EXT, 0x0f5c10, "(prefix ext 28)", xx, xx, xx, xx, xx, mrm, x, 28},
{PREFIX_EXT, 0x0f5d10, "(prefix ext 29)", xx, xx, xx, xx, xx, mrm, x, 29},
{PREFIX_EXT, 0x0f5e10, "(prefix ext 30)", xx, xx, xx, xx, xx, mrm, x, 30},
{PREFIX_EXT, 0x0f5f10, "(prefix ext 31)", xx, xx, xx, xx, xx, mrm, x, 31},
/* 60 */
{PREFIX_EXT, 0x0f6010, "(prefix ext 32)", xx, xx, xx, xx, xx, mrm, x, 32},
{PREFIX_EXT, 0x0f6110, "(prefix ext 33)", xx, xx, xx, xx, xx, mrm, x, 33},
{PREFIX_EXT, 0x0f6210, "(prefix ext 34)", xx, xx, xx, xx, xx, mrm, x, 34},
{PREFIX_EXT, 0x0f6310, "(prefix ext 35)", xx, xx, xx, xx, xx, mrm, x, 35},
{PREFIX_EXT, 0x0f6410, "(prefix ext 36)", xx, xx, xx, xx, xx, mrm, x, 36},
{PREFIX_EXT, 0x0f6510, "(prefix ext 37)", xx, xx, xx, xx, xx, mrm, x, 37},
{PREFIX_EXT, 0x0f6610, "(prefix ext 38)", xx, xx, xx, xx, xx, mrm, x, 38},
{PREFIX_EXT, 0x0f6710, "(prefix ext 39)", xx, xx, xx, xx, xx, mrm, x, 39},
/* 68 */
{PREFIX_EXT, 0x0f6810, "(prefix ext 40)", xx, xx, xx, xx, xx, mrm, x, 40},
{PREFIX_EXT, 0x0f6910, "(prefix ext 41)", xx, xx, xx, xx, xx, mrm, x, 41},
{PREFIX_EXT, 0x0f6a10, "(prefix ext 42)", xx, xx, xx, xx, xx, mrm, x, 42},
{PREFIX_EXT, 0x0f6b10, "(prefix ext 43)", xx, xx, xx, xx, xx, mrm, x, 43},
{PREFIX_EXT, 0x0f6c10, "(prefix ext 44)", xx, xx, xx, xx, xx, mrm, x, 44},
{PREFIX_EXT, 0x0f6d10, "(prefix ext 45)", xx, xx, xx, xx, xx, mrm, x, 45},
{PREFIX_EXT, 0x0f6e10, "(prefix ext 46)", xx, xx, xx, xx, xx, mrm, x, 46},
{PREFIX_EXT, 0x0f6f10, "(prefix ext 112)", xx, xx, xx, xx, xx, mrm, x, 112},
/* 70 */
{PREFIX_EXT, 0x0f7010, "(prefix ext 47)", xx, xx, xx, xx, xx, mrm, x, 47},
{EXTENSION, 0x0f7110, "(group 12)", xx, xx, xx, xx, xx, mrm, x, 19},
{EXTENSION, 0x0f7210, "(group 13)", xx, xx, xx, xx, xx, mrm, x, 20},
{EXTENSION, 0x0f7310, "(group 14)", xx, xx, xx, xx, xx, mrm, x, 21},
{PREFIX_EXT, 0x0f7410, "(prefix ext 48)", xx, xx, xx, xx, xx, mrm, x, 48},
{PREFIX_EXT, 0x0f7510, "(prefix ext 49)", xx, xx, xx, xx, xx, mrm, x, 49},
{PREFIX_EXT, 0x0f7610, "(prefix ext 50)", xx, xx, xx, xx, xx, mrm, x, 50},
{VEX_L_EXT, 0x0f7710, "(vex L ext 0)", xx, xx, xx, xx, xx, no, x, 0},
/* 78 */
{PREFIX_EXT, 0x0f7810, "(prefix ext 134)", xx, xx, xx, xx, xx, mrm, x, 134},
{PREFIX_EXT, 0x0f7910, "(prefix ext 135)", xx, xx, xx, xx, xx, mrm, x, 135},
{INVALID, 0x0f7a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7c10, "(prefix ext 114)", xx, xx, xx, xx, xx, mrm, x, 114},
{PREFIX_EXT, 0x0f7d10, "(prefix ext 115)", xx, xx, xx, xx, xx, mrm, x, 115},
{PREFIX_EXT, 0x0f7e10, "(prefix ext 51)", xx, xx, xx, xx, xx, mrm, x, 51},
{PREFIX_EXT, 0x0f7f10, "(prefix ext 113)", xx, xx, xx, xx, xx, mrm, x, 113},
/* 80 */
{OP_jo, 0x0f8010, "jo", xx, xx, Jz, xx, xx, no, fRO, END_LIST},
{OP_jno, 0x0f8110, "jno", xx, xx, Jz, xx, xx, no, fRO, END_LIST},
{OP_jb, 0x0f8210, "jb", xx, xx, Jz, xx, xx, no, fRC, END_LIST},
{OP_jnb, 0x0f8310, "jnb", xx, xx, Jz, xx, xx, no, fRC, END_LIST},
{OP_jz, 0x0f8410, "jz", xx, xx, Jz, xx, xx, no, fRZ, END_LIST},
{OP_jnz, 0x0f8510, "jnz", xx, xx, Jz, xx, xx, no, fRZ, END_LIST},
{OP_jbe, 0x0f8610, "jbe", xx, xx, Jz, xx, xx, no, (fRC|fRZ), END_LIST},
{OP_jnbe,0x0f8710, "jnbe",xx, xx, Jz, xx, xx, no, (fRC|fRZ), END_LIST},
/* 88 */
{OP_js, 0x0f8810, "js", xx, xx, Jz, xx, xx, no, fRS, END_LIST},
{OP_jns, 0x0f8910, "jns", xx, xx, Jz, xx, xx, no, fRS, END_LIST},
{OP_jp, 0x0f8a10, "jp", xx, xx, Jz, xx, xx, no, fRP, END_LIST},
{OP_jnp, 0x0f8b10, "jnp", xx, xx, Jz, xx, xx, no, fRP, END_LIST},
{OP_jl, 0x0f8c10, "jl", xx, xx, Jz, xx, xx, no, (fRS|fRO), END_LIST},
{OP_jnl, 0x0f8d10, "jnl", xx, xx, Jz, xx, xx, no, (fRS|fRO), END_LIST},
{OP_jle, 0x0f8e10, "jle", xx, xx, Jz, xx, xx, no, (fRS|fRO|fRZ), END_LIST},
{OP_jnle,0x0f8f10, "jnle",xx, xx, Jz, xx, xx, no, (fRS|fRO|fRZ), END_LIST},
/* 90 */
{E_VEX_EXT, 0x0f9010, "(e_vex ext 79)", xx, xx, xx, xx, xx, mrm, x, 79},
{E_VEX_EXT, 0x0f9110, "(e_vex ext 80)", xx, xx, xx, xx, xx, mrm, x, 80},
{E_VEX_EXT, 0x0f9210, "(e_vex ext 81)", xx, xx, xx, xx, xx, mrm, x, 81},
{E_VEX_EXT, 0x0f9310, "(e_vex ext 82)", xx, xx, xx, xx, xx, mrm, x, 82},
{OP_setz, 0x0f9410, "setz", Eb, xx, xx, xx, xx, mrm, fRZ, END_LIST},
{OP_setnz, 0x0f9510, "setnz", Eb, xx, xx, xx, xx, mrm, fRZ, END_LIST},
{OP_setbe, 0x0f9610, "setbe", Eb, xx, xx, xx, xx, mrm, (fRC|fRZ), END_LIST},
{OP_setnbe,0x0f9710, "setnbe",Eb, xx, xx, xx, xx, mrm, (fRC|fRZ), END_LIST},
/* 98 */
{E_VEX_EXT, 0x0f9810, "(e_vex ext 91)", xx, xx, xx, xx, xx, mrm, x, 91},
{E_VEX_EXT, 0x0f9910, "(e_vex ext 92)", xx, xx, xx, xx, xx, mrm, x, 92},
{OP_setp, 0x0f9a10, "setp", Eb, xx, xx, xx, xx, mrm, fRP, END_LIST},
{OP_setnp, 0x0f9b10, "setnp", Eb, xx, xx, xx, xx, mrm, fRP, END_LIST},
{OP_setl, 0x0f9c10, "setl", Eb, xx, xx, xx, xx, mrm, (fRS|fRO), END_LIST},
{OP_setnl, 0x0f9d10, "setnl", Eb, xx, xx, xx, xx, mrm, (fRS|fRO), END_LIST},
{OP_setle, 0x0f9e10, "setle", Eb, xx, xx, xx, xx, mrm, (fRS|fRO|fRZ), END_LIST},
{OP_setnle,0x0f9f10, "setnle",Eb, xx, xx, xx, xx, mrm, (fRS|fRO|fRZ), END_LIST},
/* a0 */
{OP_push, 0x0fa010, "push", xsp, i_xSPo1, fs, xsp, xx, no, x, tsb[0xa8]},
{OP_pop, 0x0fa110, "pop", fs, xsp, xsp, i_xSP, xx, no, x, tsb[0xa9]},
{OP_cpuid, 0x0fa210, "cpuid", eax, ebx, eax, ecx, xx, xop, x, exop[0x06]},
{OP_bt, 0x0fa310, "bt", xx, xx, Ev, Gv, xx, mrm, fW6, tex[15][4]},
{OP_shld, 0x0fa410, "shld", Ev, xx, Gv, Ib, Ev, mrm, fW6, tsb[0xa5]},
{OP_shld, 0x0fa510, "shld", Ev, xx, Gv, cl, Ev, mrm, fW6, END_LIST},
{INVALID, 0x0fa610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fa710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* a8 */
{OP_push, 0x0fa810, "push", xsp, i_xSPo1, gs, xsp, xx, no, x, END_LIST},
{OP_pop, 0x0fa910, "pop", gs, xsp, xsp, i_xSP, xx, no, x, END_LIST},
{OP_rsm, 0x0faa10, "rsm", xx, xx, xx, xx, xx, no, fWX, END_LIST},
{OP_bts, 0x0fab10, "bts", Ev, xx, Gv, Ev, xx, mrm, fW6, tex[15][5]},
{OP_shrd, 0x0fac10, "shrd", Ev, xx, Gv, Ib, Ev, mrm, fW6, tsb[0xad]},
{OP_shrd, 0x0fad10, "shrd", Ev, xx, Gv, cl, Ev, mrm, fW6, END_LIST},
{EXTENSION, 0x0fae10, "(group 15)", xx, xx, xx, xx, xx, mrm, x, 22},
{OP_imul, 0x0faf10, "imul", Gv, xx, Ev, Gv, xx, mrm, fW6, tfb[0x69]},
/* b0 */
{OP_cmpxchg, 0x0fb010, "cmpxchg", Eb, al, Gb, Eb, al, mrm, fW6, END_LIST},
{OP_cmpxchg, 0x0fb110, "cmpxchg", Ev, eAX, Gv, Ev, eAX, mrm, fW6, tsb[0xb0]},
{OP_lss, 0x0fb210, "lss", Gv, ss, Mp, xx, xx, mrm, x, END_LIST},
{OP_btr, 0x0fb310, "btr", Ev, xx, Gv, Ev, xx, mrm, fW6, tex[15][6]},
{OP_lfs, 0x0fb410, "lfs", Gv, fs, Mp, xx, xx, mrm, x, END_LIST},
{OP_lgs, 0x0fb510, "lgs", Gv, gs, Mp, xx, xx, mrm, x, END_LIST},
{OP_movzx, 0x0fb610, "movzx", Gv, xx, Eb, xx, xx, mrm, x, END_LIST},
{OP_movzx, 0x0fb710, "movzx", Gv, xx, Ew, xx, xx, mrm, x, tsb[0xb6]},
/* b8 */
{OP_popcnt, 0xf30fb810, "popcnt", Gv, xx, Ev, xx, xx, mrm|reqp, fW6, END_LIST},
/* This is Group 10, but all identical (ud2b) so no reason to split opcode by /reg */
{OP_ud2b, 0x0fb910, "ud2b", xx, xx, xx, xx, xx, no, x, END_LIST},
{EXTENSION, 0x0fba10, "(group 8)", xx, xx, xx, xx, xx, mrm, x, 15},
{OP_btc, 0x0fbb10, "btc", Ev, xx, Gv, Ev, xx, mrm, fW6, tex[15][7]},
{PREFIX_EXT, 0x0fbc10, "(prefix ext 140)", xx, xx, xx, xx, xx, mrm, x, 140},
{PREFIX_EXT, 0x0fbd10, "(prefix ext 136)", xx, xx, xx, xx, xx, mrm, x, 136},
{OP_movsx, 0x0fbe10, "movsx", Gv, xx, Eb, xx, xx, mrm, x, END_LIST},
{OP_movsx, 0x0fbf10, "movsx", Gv, xx, Ew, xx, xx, mrm, x, tsb[0xbe]},
/* c0 */
{OP_xadd, 0x0fc010, "xadd", Eb, Gb, Eb, Gb, xx, mrm, fW6, END_LIST},
{OP_xadd, 0x0fc110, "xadd", Ev, Gv, Ev, Gv, xx, mrm, fW6, tsb[0xc0]},
{PREFIX_EXT, 0x0fc210, "(prefix ext 52)", xx, xx, xx, xx, xx, mrm, x, 52},
{OP_movnti, 0x0fc310, "movnti", Md_q, xx, Gd_q, xx, xx, mrm, x, END_LIST},
{PREFIX_EXT, 0x0fc410, "(prefix ext 53)", xx, xx, xx, xx, xx, mrm, x, 53},
{PREFIX_EXT, 0x0fc510, "(prefix ext 54)", xx, xx, xx, xx, xx, mrm, x, 54},
{PREFIX_EXT, 0x0fc610, "(prefix ext 55)", xx, xx, xx, xx, xx, mrm, x, 55},
{EXTENSION, 0x0fc710, "(group 9)", xx, xx, xx, xx, xx, mrm, x, 16},
/* c8 */
{OP_bswap, 0x0fc810, "bswap", uAX_x, xx, uAX_x, xx, xx, no, x, tsb[0xc9]},
{OP_bswap, 0x0fc910, "bswap", uCX_x, xx, uCX_x, xx, xx, no, x, tsb[0xca]},
{OP_bswap, 0x0fca10, "bswap", uDX_x, xx, uDX_x, xx, xx, no, x, tsb[0xcb]},
{OP_bswap, 0x0fcb10, "bswap", uBX_x, xx, uBX_x, xx, xx, no, x, tsb[0xcc]},
{OP_bswap, 0x0fcc10, "bswap", uSP_x, xx, uSP_x, xx, xx, no, x, tsb[0xcd]},
{OP_bswap, 0x0fcd10, "bswap", uBP_x, xx, uBP_x, xx, xx, no, x, tsb[0xce]},
{OP_bswap, 0x0fce10, "bswap", uSI_x, xx, uSI_x, xx, xx, no, x, tsb[0xcf]},
{OP_bswap, 0x0fcf10, "bswap", uDI_x, xx, uDI_x, xx, xx, no, x, END_LIST},
/* d0 */
{PREFIX_EXT, 0x0fd010, "(prefix ext 116)", xx, xx, xx, xx, xx, mrm, x, 116},
{PREFIX_EXT, 0x0fd110, "(prefix ext 56)", xx, xx, xx, xx, xx, mrm, x, 56},
{PREFIX_EXT, 0x0fd210, "(prefix ext 57)", xx, xx, xx, xx, xx, mrm, x, 57},
{PREFIX_EXT, 0x0fd310, "(prefix ext 58)", xx, xx, xx, xx, xx, mrm, x, 58},
{PREFIX_EXT, 0x0fd410, "(prefix ext 59)", xx, xx, xx, xx, xx, mrm, x, 59},
{PREFIX_EXT, 0x0fd510, "(prefix ext 60)", xx, xx, xx, xx, xx, mrm, x, 60},
{PREFIX_EXT, 0x0fd610, "(prefix ext 61)", xx, xx, xx, xx, xx, mrm, x, 61},
{PREFIX_EXT, 0x0fd710, "(prefix ext 62)", xx, xx, xx, xx, xx, mrm, x, 62},
/* d8 */
{PREFIX_EXT, 0x0fd810, "(prefix ext 63)", xx, xx, xx, xx, xx, mrm, x, 63},
{PREFIX_EXT, 0x0fd910, "(prefix ext 64)", xx, xx, xx, xx, xx, mrm, x, 64},
{PREFIX_EXT, 0x0fda10, "(prefix ext 65)", xx, xx, xx, xx, xx, mrm, x, 65},
{PREFIX_EXT, 0x0fdb10, "(prefix ext 66)", xx, xx, xx, xx, xx, mrm, x, 66},
{PREFIX_EXT, 0x0fdc10, "(prefix ext 67)", xx, xx, xx, xx, xx, mrm, x, 67},
{PREFIX_EXT, 0x0fdd10, "(prefix ext 68)", xx, xx, xx, xx, xx, mrm, x, 68},
{PREFIX_EXT, 0x0fde10, "(prefix ext 69)", xx, xx, xx, xx, xx, mrm, x, 69},
{PREFIX_EXT, 0x0fdf10, "(prefix ext 70)", xx, xx, xx, xx, xx, mrm, x, 70},
/* e0 */
{PREFIX_EXT, 0x0fe010, "(prefix ext 71)", xx, xx, xx, xx, xx, mrm, x, 71},
{PREFIX_EXT, 0x0fe110, "(prefix ext 72)", xx, xx, xx, xx, xx, mrm, x, 72},
{PREFIX_EXT, 0x0fe210, "(prefix ext 73)", xx, xx, xx, xx, xx, mrm, x, 73},
{PREFIX_EXT, 0x0fe310, "(prefix ext 74)", xx, xx, xx, xx, xx, mrm, x, 74},
{PREFIX_EXT, 0x0fe410, "(prefix ext 75)", xx, xx, xx, xx, xx, mrm, x, 75},
{PREFIX_EXT, 0x0fe510, "(prefix ext 76)", xx, xx, xx, xx, xx, mrm, x, 76},
{PREFIX_EXT, 0x0fe610, "(prefix ext 77)", xx, xx, xx, xx, xx, mrm, x, 77},
{PREFIX_EXT, 0x0fe710, "(prefix ext 78)", xx, xx, xx, xx, xx, mrm, x, 78},
/* e8 */
{PREFIX_EXT, 0x0fe810, "(prefix ext 79)", xx, xx, xx, xx, xx, mrm, x, 79},
{PREFIX_EXT, 0x0fe910, "(prefix ext 80)", xx, xx, xx, xx, xx, mrm, x, 80},
{PREFIX_EXT, 0x0fea10, "(prefix ext 81)", xx, xx, xx, xx, xx, mrm, x, 81},
{PREFIX_EXT, 0x0feb10, "(prefix ext 82)", xx, xx, xx, xx, xx, mrm, x, 82},
{PREFIX_EXT, 0x0fec10, "(prefix ext 83)", xx, xx, xx, xx, xx, mrm, x, 83},
{PREFIX_EXT, 0x0fed10, "(prefix ext 84)", xx, xx, xx, xx, xx, mrm, x, 84},
{PREFIX_EXT, 0x0fee10, "(prefix ext 85)", xx, xx, xx, xx, xx, mrm, x, 85},
{PREFIX_EXT, 0x0fef10, "(prefix ext 86)", xx, xx, xx, xx, xx, mrm, x, 86},
/* f0 */
{PREFIX_EXT, 0x0ff010, "(prefix ext 117)", xx, xx, xx, xx, xx, mrm, x, 117},
{PREFIX_EXT, 0x0ff110, "(prefix ext 87)", xx, xx, xx, xx, xx, mrm, x, 87},
{PREFIX_EXT, 0x0ff210, "(prefix ext 88)", xx, xx, xx, xx, xx, mrm, x, 88},
{PREFIX_EXT, 0x0ff310, "(prefix ext 89)", xx, xx, xx, xx, xx, mrm, x, 89},
{PREFIX_EXT, 0x0ff410, "(prefix ext 90)", xx, xx, xx, xx, xx, mrm, x, 90},
{PREFIX_EXT, 0x0ff510, "(prefix ext 91)", xx, xx, xx, xx, xx, mrm, x, 91},
{PREFIX_EXT, 0x0ff610, "(prefix ext 92)", xx, xx, xx, xx, xx, mrm, x, 92},
{PREFIX_EXT, 0x0ff710, "(prefix ext 93)", xx, xx, xx, xx, xx, mrm, x, 93},
/* f8 */
{PREFIX_EXT, 0x0ff810, "(prefix ext 94)", xx, xx, xx, xx, xx, mrm, x, 94},
{PREFIX_EXT, 0x0ff910, "(prefix ext 95)", xx, xx, xx, xx, xx, mrm, x, 95},
{PREFIX_EXT, 0x0ffa10, "(prefix ext 96)", xx, xx, xx, xx, xx, mrm, x, 96},
{PREFIX_EXT, 0x0ffb10, "(prefix ext 97)", xx, xx, xx, xx, xx, mrm, x, 97},
{PREFIX_EXT, 0x0ffc10, "(prefix ext 98)", xx, xx, xx, xx, xx, mrm, x, 98},
{PREFIX_EXT, 0x0ffd10, "(prefix ext 99)", xx, xx, xx, xx, xx, mrm, x, 99},
{PREFIX_EXT, 0x0ffe10, "(prefix ext 100)", xx, xx, xx, xx, xx, mrm, x, 100},
{INVALID, 0x0fff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
};
/****************************************************************************
* Opcode extensions
* This is from Table A-6
*/
const instr_info_t base_extensions[][8] = {
/* group 1a -- first opcode byte 80: all assumed to have Ib */
{ /* extensions[0] */
{OP_add, 0x800020, "add", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][0]},
{OP_or, 0x800021, "or", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][1]},
{OP_adc, 0x800022, "adc", Eb, xx, Ib, Eb, xx, mrm, (fW6|fRC), tex[25][2]},
{OP_sbb, 0x800023, "sbb", Eb, xx, Ib, Eb, xx, mrm, (fW6|fRC), tex[25][3]},
{OP_and, 0x800024, "and", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][4]},
{OP_sub, 0x800025, "sub", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][5]},
{OP_xor, 0x800026, "xor", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[25][6]},
{OP_cmp, 0x800027, "cmp", xx, xx, Eb, Ib, xx, mrm, fW6, tex[25][7]},
},
/* group 1b -- first opcode byte 81: all assumed to have Iz */
{ /* extensions[1] */
{OP_add, 0x810020, "add", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][0]},
{OP_or, 0x810021, "or", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][1]},
{OP_adc, 0x810022, "adc", Ev, xx, Iz, Ev, xx, mrm, (fW6|fRC), tex[2][2]},
{OP_sbb, 0x810023, "sbb", Ev, xx, Iz, Ev, xx, mrm, (fW6|fRC), tex[2][3]},
{OP_and, 0x810024, "and", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][4]},
{OP_sub, 0x810025, "sub", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][5]},
{OP_xor, 0x810026, "xor", Ev, xx, Iz, Ev, xx, mrm, fW6, tex[2][6]},
{OP_cmp, 0x810027, "cmp", xx, xx, Ev, Iz, xx, mrm, fW6, tex[2][7]},
},
/* group 1c -- first opcode byte 83 (for 82, see below "group 1c*"):
* all assumed to have Ib */
{ /* extensions[2] */
{OP_add, 0x830020, "add", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][0]},
{OP_or, 0x830021, "or", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][1]},
{OP_adc, 0x830022, "adc", Ev, xx, Ib, Ev, xx, mrm, (fW6|fRC), tex[0][2]},
{OP_sbb, 0x830023, "sbb", Ev, xx, Ib, Ev, xx, mrm, (fW6|fRC), tex[0][3]},
{OP_and, 0x830024, "and", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][4]},
{OP_sub, 0x830025, "sub", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][5]},
{OP_xor, 0x830026, "xor", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[0][6]},
{OP_cmp, 0x830027, "cmp", xx, xx, Ev, Ib, xx, mrm, fW6, tex[0][7]},
},
/* group 2a -- first opcode byte c0: all assumed to have Ib */
{ /* extensions[3] */
{OP_rol, 0xc00020, "rol", Eb, xx, Ib, Eb, xx, mrm, (fWC|fWO), tex[5][0]},
{OP_ror, 0xc00021, "ror", Eb, xx, Ib, Eb, xx, mrm, (fWC|fWO), tex[5][1]},
{OP_rcl, 0xc00022, "rcl", Eb, xx, Ib, Eb, xx, mrm, (fRC|fWC|fWO), tex[5][2]},
{OP_rcr, 0xc00023, "rcr", Eb, xx, Ib, Eb, xx, mrm, (fRC|fWC|fWO), tex[5][3]},
{OP_shl, 0xc00024, "shl", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[5][4]},
{OP_shr, 0xc00025, "shr", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[5][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xc00026, "shl", Eb, xx, Ib, Eb, xx, mrm, fW6, END_LIST},
{OP_sar, 0xc00027, "sar", Eb, xx, Ib, Eb, xx, mrm, fW6, tex[5][7]},
},
/* group 2b -- first opcode byte c1: all assumed to have Ib */
{ /* extensions[4] */
{OP_rol, 0xc10020, "rol", Ev, xx, Ib, Ev, xx, mrm, (fWC|fWO), tex[6][0]},
{OP_ror, 0xc10021, "ror", Ev, xx, Ib, Ev, xx, mrm, (fWC|fWO), tex[6][1]},
{OP_rcl, 0xc10022, "rcl", Ev, xx, Ib, Ev, xx, mrm, (fRC|fWC|fWO), tex[6][2]},
{OP_rcr, 0xc10023, "rcr", Ev, xx, Ib, Ev, xx, mrm, (fRC|fWC|fWO), tex[6][3]},
{OP_shl, 0xc10024, "shl", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[6][4]},
{OP_shr, 0xc10025, "shr", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[6][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xc10026, "shl", Ev, xx, Ib, Ev, xx, mrm, fW6, END_LIST},
{OP_sar, 0xc10027, "sar", Ev, xx, Ib, Ev, xx, mrm, fW6, tex[6][7]},
},
/* group 2c -- first opcode byte d0 */
{ /* extensions[5] */
{OP_rol, 0xd00020, "rol", Eb, xx, c1, Eb, xx, mrm, (fWC|fWO), tex[8][0]},
{OP_ror, 0xd00021, "ror", Eb, xx, c1, Eb, xx, mrm, (fWC|fWO), tex[8][1]},
{OP_rcl, 0xd00022, "rcl", Eb, xx, c1, Eb, xx, mrm, (fRC|fWC|fWO), tex[8][2]},
{OP_rcr, 0xd00023, "rcr", Eb, xx, c1, Eb, xx, mrm, (fRC|fWC|fWO), tex[8][3]},
{OP_shl, 0xd00024, "shl", Eb, xx, c1, Eb, xx, mrm, fW6, tex[8][4]},
{OP_shr, 0xd00025, "shr", Eb, xx, c1, Eb, xx, mrm, fW6, tex[8][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xd00026, "shl", Eb, xx, c1, Eb, xx, mrm, fW6, END_LIST},
{OP_sar, 0xd00027, "sar", Eb, xx, c1, Eb, xx, mrm, fW6, tex[8][7]},
},
/* group 2d -- first opcode byte d1 */
{ /* extensions[6] */
{OP_rol, 0xd10020, "rol", Ev, xx, c1, Ev, xx, mrm, (fWC|fWO), tex[3][0]},
{OP_ror, 0xd10021, "ror", Ev, xx, c1, Ev, xx, mrm, (fWC|fWO), tex[3][1]},
{OP_rcl, 0xd10022, "rcl", Ev, xx, c1, Ev, xx, mrm, (fRC|fWC|fWO), tex[3][2]},
{OP_rcr, 0xd10023, "rcr", Ev, xx, c1, Ev, xx, mrm, (fRC|fWC|fWO), tex[3][3]},
{OP_shl, 0xd10024, "shl", Ev, xx, c1, Ev, xx, mrm, fW6, tex[3][4]},
{OP_shr, 0xd10025, "shr", Ev, xx, c1, Ev, xx, mrm, fW6, tex[3][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xd10026, "shl", Ev, xx, c1, Ev, xx, mrm, fW6, END_LIST},
{OP_sar, 0xd10027, "sar", Ev, xx, c1, Ev, xx, mrm, fW6, tex[3][7]},
},
/* group 2e -- first opcode byte d2 */
{ /* extensions[7] */
{OP_rol, 0xd20020, "rol", Eb, xx, cl, Eb, xx, mrm, (fWC|fWO), END_LIST},
{OP_ror, 0xd20021, "ror", Eb, xx, cl, Eb, xx, mrm, (fWC|fWO), END_LIST},
{OP_rcl, 0xd20022, "rcl", Eb, xx, cl, Eb, xx, mrm, (fRC|fWC|fWO), END_LIST},
{OP_rcr, 0xd20023, "rcr", Eb, xx, cl, Eb, xx, mrm, (fRC|fWC|fWO), END_LIST},
{OP_shl, 0xd20024, "shl", Eb, xx, cl, Eb, xx, mrm, fW6, END_LIST},
{OP_shr, 0xd20025, "shr", Eb, xx, cl, Eb, xx, mrm, fW6, END_LIST},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xd20026, "shl", Eb, xx, cl, Eb, xx, mrm, fW6, END_LIST},
{OP_sar, 0xd20027, "sar", Eb, xx, cl, Eb, xx, mrm, fW6, END_LIST},
},
/* group 2f -- first opcode byte d3 */
{ /* extensions[8] */
{OP_rol, 0xd30020, "rol", Ev, xx, cl, Ev, xx, mrm, (fWC|fWO), tex[7][0]},
{OP_ror, 0xd30021, "ror", Ev, xx, cl, Ev, xx, mrm, (fWC|fWO), tex[7][1]},
{OP_rcl, 0xd30022, "rcl", Ev, xx, cl, Ev, xx, mrm, (fRC|fWC|fWO), tex[7][2]},
{OP_rcr, 0xd30023, "rcr", Ev, xx, cl, Ev, xx, mrm, (fRC|fWC|fWO), tex[7][3]},
{OP_shl, 0xd30024, "shl", Ev, xx, cl, Ev, xx, mrm, fW6, tex[7][4]},
{OP_shr, 0xd30025, "shr", Ev, xx, cl, Ev, xx, mrm, fW6, tex[7][5]},
/* PR 332254: /6 is an alias for /4; we do not add to encoding chain though */
{OP_shl, 0xd30026, "shl", Ev, xx, cl, Ev, xx, mrm, fW6, END_LIST},
{OP_sar, 0xd30027, "sar", Ev, xx, cl, Ev, xx, mrm, fW6, tex[7][7]},
},
/* group 3a -- first opcode byte f6 */
{ /* extensions[9] */
{OP_test, 0xf60020, "test", xx, xx, Eb, Ib, xx, mrm, fW6, END_LIST},
/* PR 332254: /1 is an alias for /0; we do not add to encoding chain though */
{OP_test, 0xf60021, "test", xx, xx, Eb, Ib, xx, mrm, fW6, END_LIST},
{OP_not, 0xf60022, "not", Eb, xx, Eb, xx, xx, mrm, x, END_LIST},
{OP_neg, 0xf60023, "neg", Eb, xx, Eb, xx, xx, mrm, fW6, END_LIST},
{OP_mul, 0xf60024, "mul", ax, xx, Eb, al, xx, mrm, fW6, END_LIST},
{OP_imul, 0xf60025, "imul", ax, xx, Eb, al, xx, mrm, fW6, tsb[0xaf]},
{OP_div, 0xf60026, "div", ah, al, Eb, ax, xx, mrm, fW6, END_LIST},
{OP_idiv, 0xf60027, "idiv", ah, al, Eb, ax, xx, mrm, fW6, END_LIST},
},
/* group 3b -- first opcode byte f7 */
{ /* extensions[10] */
{OP_test, 0xf70020, "test", xx, xx, Ev, Iz, xx, mrm, fW6, tex[9][0]},
/* PR 332254: /1 is an alias for /0; we do not add to encoding chain though */
{OP_test, 0xf70021, "test", xx, xx, Ev, Iz, xx, mrm, fW6, END_LIST},
{OP_not, 0xf70022, "not", Ev, xx, Ev, xx, xx, mrm, x, tex[9][2]},
{OP_neg, 0xf70023, "neg", Ev, xx, Ev, xx, xx, mrm, fW6, tex[9][3]},
{OP_mul, 0xf70024, "mul", eDX, eAX, Ev, eAX, xx, mrm, fW6, tex[9][4]},
{OP_imul, 0xf70025, "imul", eDX, eAX, Ev, eAX, xx, mrm, fW6, tex[9][5]},
{OP_div, 0xf70026, "div", eDX, eAX, Ev, eDX, eAX, mrm, fW6, tex[9][6]},
{OP_idiv, 0xf70027, "idiv", eDX, eAX, Ev, eDX, eAX, mrm, fW6, tex[9][7]},
},
/* group 4 (first byte fe) */
{ /* extensions[11] */
{OP_inc, 0xfe0020, "inc", Eb, xx, Eb, xx, xx, mrm, (fW6&(~fWC)), END_LIST},
{OP_dec, 0xfe0021, "dec", Eb, xx, Eb, xx, xx, mrm, (fW6&(~fWC)), END_LIST},
{INVALID, 0xfe0022, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0023, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xfe0027, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 5 (first byte ff) */
{ /* extensions[12] */
{OP_inc, 0xff0020, "inc", Ev, xx, Ev, xx, xx, mrm, (fW6&(~fWC)), tex[11][0]},
{OP_dec, 0xff0021, "dec", Ev, xx, Ev, xx, xx, mrm, (fW6&(~fWC)), tex[11][1]},
{OP_call_ind, 0xff0022, "call", xsp, i_iSPo1, i_Exi, xsp, xx, mrm, x, END_LIST},
/* Note how a far call's stack operand size matches far ret rather than call */
{OP_call_far_ind, 0xff0023, "lcall", xsp, i_vSPo2, i_Ep, xsp, xx, mrm, x, END_LIST},
{OP_jmp_ind, 0xff0024, "jmp", xx, xx, i_Exi, xx, xx, mrm, x, END_LIST},
{OP_jmp_far_ind, 0xff0025, "ljmp", xx, xx, i_Ep, xx, xx, mrm, x, END_LIST},
{OP_push, 0xff0026, "push", xsp, i_xSPo1, Esv, xsp, xx, mrm, x, tfb[0x06]},
{INVALID, 0xff0027, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 6 (first bytes 0f 00) */
{ /* extensions[13] */
{OP_sldt, 0x0f0030, "sldt", Ew, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_str, 0x0f0031, "str", Ew, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_lldt, 0x0f0032, "lldt", xx, xx, Ew, xx, xx, mrm, x, END_LIST},
{OP_ltr, 0x0f0033, "ltr", xx, xx, Ew, xx, xx, mrm, x, END_LIST},
{OP_verr, 0x0f0034, "verr", xx, xx, Ew, xx, xx, mrm, fWZ, END_LIST},
{OP_verw, 0x0f0035, "verw", xx, xx, Ew, xx, xx, mrm, fWZ, END_LIST},
{INVALID, 0x0f0036, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0037, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
},
/* group 7 (first bytes 0f 01) */
{ /* extensions[14] */
{MOD_EXT, 0x0f0130, "(group 7 mod ext 0)", xx, xx, xx, xx, xx, no, x, 0},
{MOD_EXT, 0x0f0131, "(group 7 mod ext 1)", xx, xx, xx, xx, xx, no, x, 1},
{MOD_EXT, 0x0f0132, "(group 7 mod ext 5)", xx, xx, xx, xx, xx, no, x, 5},
{MOD_EXT, 0x0f0133, "(group 7 mod ext 4)", xx, xx, xx, xx, xx, no, x, 4},
{OP_smsw, 0x0f0134, "smsw", Ew, xx, xx, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f0135, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_lmsw, 0x0f0136, "lmsw", xx, xx, Ew, xx, xx, mrm, x, END_LIST},
{MOD_EXT, 0x0f0137, "(group 7 mod ext 2)", xx, xx, xx, xx, xx, no, x, 2},
},
/* group 8 (first bytes 0f ba): all assumed to have Ib */
{ /* extensions[15] */
{INVALID, 0x0fba30, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fba31, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fba32, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fba33, "(bad)",xx, xx, xx, xx, xx, no, x, NA},
{OP_bt, 0x0fba34, "bt", xx, xx, Ev, Ib, xx, mrm, fW6, END_LIST},
{OP_bts, 0x0fba35, "bts", Ev, xx, Ib, Ev, xx, mrm, fW6, END_LIST},
{OP_btr, 0x0fba36, "btr", Ev, xx, Ib, Ev, xx, mrm, fW6, END_LIST},
{OP_btc, 0x0fba37, "btc", Ev, xx, Ib, Ev, xx, mrm, fW6, END_LIST},
},
/* group 9 (first bytes 0f c7) */
{ /* extensions[16] */
{INVALID, 0x0fc730, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_cmpxchg8b, 0x0fc731, "cmpxchg8b", Mq_dq, eAX, Mq_dq, eAX, eDX, mrm_xop, fWZ, exop[0x07]},/*"cmpxchg16b" w/ rex.w*/
{INVALID, 0x0fc732, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fc733, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{REX_W_EXT, 0x0fc734, "(rex.w ext 5)", xx, xx, xx, xx, xx, mrm, x, 5},
{INVALID, 0x0fc735, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{MOD_EXT, 0x0fc736, "(group 9 mod ext 12)", xx, xx, xx, xx, xx, mrm, x, 12},
{MOD_EXT, 0x0fc737, "(mod ext 13)", xx, xx, xx, xx, xx, mrm, x, 13},
},
/* group 10 is all ud2b and is not used by us since identical */
/* group 11a (first byte c6) */
{ /* extensions[17] */
{OP_mov_st, 0xc60020, "mov", Eb, xx, Ib, xx, xx, mrm, x, END_LIST},
{INVALID, 0xc60021, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60022, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60023, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc60026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX i#1314: this also sets eip */
{OP_xabort, 0xf8c60067, "xabort", eax, xx, Ib, xx, xx, mrm, x, END_LIST},
},
/* group 11b (first byte c7) */
{ /* extensions[18] */
/* PR 250397: be aware that mov_imm shares this tail end of mov_st templates */
{OP_mov_st, 0xc70020, "mov", Ev, xx, Iz, xx, xx, mrm, x, tex[17][0]},
{INVALID, 0xc70021, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70022, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70023, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xc70026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_xbegin, 0xf8c70067, "xbegin", xx, xx, Jz, xx, xx, mrm, x, END_LIST},
},
/* group 12 (first bytes 0f 71): all assumed to have Ib */
{ /* extensions[19] */
{INVALID, 0x0f7130, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7132, "(prefix ext 104)", xx, xx, xx, xx, xx, no, x, 104},
{INVALID, 0x0f7133, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7134, "(prefix ext 105)", xx, xx, xx, xx, xx, no, x, 105},
{INVALID, 0x0f7135, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7136, "(prefix ext 106)", xx, xx, xx, xx, xx, no, x, 106},
{INVALID, 0x0f7137, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 13 (first bytes 0f 72): all assumed to have Ib */
{ /* extensions[20] */
{INVALID, 0x0f7230, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7231, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7232, "(prefix ext 107)", xx, xx, xx, xx, xx, no, x, 107},
{INVALID, 0x0f7233, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7234, "(prefix ext 108)", xx, xx, xx, xx, xx, no, x, 108},
{INVALID, 0x0f7235, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7236, "(prefix ext 109)", xx, xx, xx, xx, xx, no, x, 109},
{INVALID, 0x0f7237, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 14 (first bytes 0f 73): all assumed to have Ib */
{ /* extensions[21] */
{INVALID, 0x0f7330, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7331, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7332, "(prefix ext 110)", xx, xx, xx, xx, xx, no, x, 110},
{PREFIX_EXT, 0x0f7333, "(prefix ext 101)", xx, xx, xx, xx, xx, no, x, 101},
{INVALID, 0x0f7334, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7335, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{PREFIX_EXT, 0x0f7336, "(prefix ext 111)", xx, xx, xx, xx, xx, no, x, 111},
{PREFIX_EXT, 0x0f7337, "(prefix ext 102)", xx, xx, xx, xx, xx, no, x, 102},
},
/* group 15 (first bytes 0f ae) */
{ /* extensions[22] */
/* Intel tables imply they may add opcodes in the mod=3 (non-mem) space in future */
{MOD_EXT, 0x0fae30, "(group 15 mod ext 14)", xx, xx, xx, xx, xx, mrm, x, 14},
{MOD_EXT, 0x0fae31, "(group 15 mod ext 15)", xx, xx, xx, xx, xx, mrm, x, 15},
{MOD_EXT, 0x0fae32, "(group 15 mod ext 16)", xx, xx, xx, xx, xx, mrm, x, 16},
{MOD_EXT, 0x0fae33, "(group 15 mod ext 17)", xx, xx, xx, xx, xx, mrm, x, 17},
{REX_W_EXT, 0x0fae34, "(rex.w ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},
{MOD_EXT, 0x0fae35, "(group 15 mod ext 6)", xx, xx, xx, xx, xx, no, x, 6},
{MOD_EXT, 0x0fae36, "(group 15 mod ext 7)", xx, xx, xx, xx, xx, no, x, 7},
{MOD_EXT, 0x0fae37, "(group 15 mod ext 3)", xx, xx, xx, xx, xx, no, x, 3},
},
/* group 16 (first bytes 0f 18) */
{ /* extensions[23] */
/* Intel tables imply they may add opcodes in the mod=3 (non-mem) space in future */
{OP_prefetchnta, 0x0f1830, "prefetchnta", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_prefetcht0, 0x0f1831, "prefetcht0", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_prefetcht1, 0x0f1832, "prefetcht1", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_prefetcht2, 0x0f1833, "prefetcht2", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1834, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1835, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1836, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
{OP_nop_modrm, 0x0f1837, "nop", xx, xx, Ed, xx, xx, mrm, x, END_LIST},
},
/* group AMD (first bytes 0f 0d) */
{ /* extensions[24] */
{OP_prefetch, 0x0f0d30, "prefetch", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_prefetchw, 0x0f0d31, "prefetchw", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f0d32, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d33, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d34, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d35, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d36, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0d37, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 1c* -- first opcode byte 82
* see PR 235092 for the discrepancies in what 0x82 should be: empirically
* and according to recent Intel manuals it matches 0x80, not 0x83 (as old
* Intel manuals implied) or invalid (as gnu tools claim).
* not linked into any encode chain.
*/
{ /* extensions[25]: all assumed to have Ib */
{OP_add, 0x820020, "add", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_or, 0x820021, "or", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_adc, 0x820022, "adc", Eb, xx, Ib, Eb, xx, mrm|i64, (fW6|fRC), END_LIST},
{OP_sbb, 0x820023, "sbb", Eb, xx, Ib, Eb, xx, mrm|i64, (fW6|fRC), END_LIST},
{OP_and, 0x820024, "and", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_sub, 0x820025, "sub", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_xor, 0x820026, "xor", Eb, xx, Ib, Eb, xx, mrm|i64, fW6, END_LIST},
{OP_cmp, 0x820027, "cmp", xx, xx, Eb, Ib, xx, mrm|i64, fW6, END_LIST},
},
/* group 1d (Intel now calling Group 1A) -- first opcode byte 8f */
{ /* extensions[26] */
{OP_pop, 0x8f0020, "pop", Esv, xsp, xsp, i_xSP, xx, mrm, x, tfb[0x17]},
/* we shouldn't ever get here for these, as this becomes an XOP prefix */
{INVALID, 0x8f0021, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0022, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0023, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x8f0027, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* XOP group 1 */
{ /* extensions[27] */
{INVALID, 0x090138, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_blcfill, 0x090139, "blcfill", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blsfill, 0x09013a, "blsfill", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blcs, 0x09013b, "blcs", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_tzmsk, 0x09013c, "tzmsk", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blcic, 0x09013d, "blcic", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blsic, 0x09013e, "blsic", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_t1mskc, 0x09013f, "t1mskc", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
},
/* XOP group 2 */
{ /* extensions[28] */
{INVALID, 0x090238, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_blcmsk, 0x090239, "blcmsk",By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0x09023a, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09023b, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09023c, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09023d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_blci, 0x09023e, "blci", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0x09023f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* XOP group 3 */
{ /* extensions[29] */
/* XXX i#1311: these instrs implicitly write to memory which we should
* find a way to encode into the IR.
*/
{OP_llwpcb, 0x091238, "llwpcb", xx, xx, Ry, xx, xx, mrm|vex, x, END_LIST},
{OP_slwpcb, 0x091239, "slwpcb", Ry, xx, xx, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0x09123a, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123b, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123c, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123e, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x09123f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* XOP group 4: all assumed to have a 4-byte immediate by xop_a_extra[] */
{ /* extensions[30] */
/* XXX i#1311: these instrs implicitly write to memory which we should
* find a way to encode into the IR.
*/
{OP_lwpins, 0x0a1238, "lwpins", xx, xx, By, Ed, Id, mrm|vex, fWC, END_LIST},
{OP_lwpval, 0x0a1239, "lwpval", xx, xx, By, Ed, Id, mrm|vex, x, END_LIST},
{INVALID, 0x0a123a, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123b, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123c, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123e, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0a123f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* group 17 */
{ /* extensions[31] */
{INVALID, 0x38f338, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_blsr, 0x38f339, "blsr", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blsmsk, 0x38f33a, "blsmsk", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{OP_blsi, 0x38f33b, "blsi", By, xx, Ey, xx, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0x38f33c, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x38f33d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x38f33e, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x38f33f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Two-byte instructions that differ depending on presence of
* prefixes, indexed in this order:
* none, 0xf3, 0x66, 0xf2
* A second set is used for vex-encoded instructions, indexed in the
* same order by prefix.
* A third set is used for evex-encoded instructions, indexed in the
* same order by prefix.
*
* N.B.: to avoid having a full entry here when there is only one
* valid opcode prefix, use |reqp in the original entry instead of
* pointing to this table.
*/
const instr_info_t prefix_extensions[][12] = {
/* prefix extension 0 */
{
{OP_movups, 0x0f1010, "movups", Vps, xx, Wps, xx, xx, mrm, x, tpe[1][0]},
{MOD_EXT, 0xf30f1010, "(mod ext 18)", xx, xx, xx, xx, xx, mrm, x, 18},
{OP_movupd, 0x660f1010, "movupd", Vpd, xx, Wpd, xx, xx, mrm, x, tpe[1][2]},
{MOD_EXT, 0xf20f1010, "(mod ext 19)", xx, xx, xx, xx, xx, mrm, x, 19},
{OP_vmovups, 0x0f1010, "vmovups", Vvs, xx, Wvs, xx, xx, mrm|vex, x, tpe[1][4]},
{MOD_EXT, 0xf30f1010, "(mod ext 8)", xx, xx, xx, xx, xx, mrm|vex, x, 8},
{OP_vmovupd, 0x660f1010, "vmovupd", Vvd, xx, Wvd, xx, xx, mrm|vex, x, tpe[1][6]},
{MOD_EXT, 0xf20f1010, "(mod ext 9)", xx, xx, xx, xx, xx, mrm|vex, x, 9},
{EVEX_W_EXT, 0x0f1010, "(evex_W ext 0)", xx, xx, xx, xx, xx, mrm|evex, x, 0},
{MOD_EXT, 0xf30f1010, "(mod ext 20)", xx, xx, xx, xx, xx, mrm|evex, x, 20},
{EVEX_W_EXT, 0x660f1010, "(evex_W ext 2)", xx, xx, xx, xx, xx, mrm|evex, x, 2},
{MOD_EXT, 0xf20f1010, "(mod ext 21)", xx, xx, xx, xx, xx, mrm|evex, x, 21},
},
/* prefix extension 1 */
{
{OP_movups, 0x0f1110, "movups", Wps, xx, Vps, xx, xx, mrm, x, END_LIST},
{OP_movss, 0xf30f1110, "movss", Wss, xx, Vss, xx, xx, mrm, x, END_LIST},
{OP_movupd, 0x660f1110, "movupd", Wpd, xx, Vpd, xx, xx, mrm, x, END_LIST},
{OP_movsd, 0xf20f1110, "movsd", Wsd, xx, Vsd, xx, xx, mrm, x, END_LIST},
{OP_vmovups, 0x0f1110, "vmovups", Wvs, xx, Vvs, xx, xx, mrm|vex, x, tevexw[0][0]},
{MOD_EXT, 0xf30f1110, "(mod ext 10)", xx, xx, xx, xx, xx, mrm|vex, x, 10},
{OP_vmovupd, 0x660f1110, "vmovupd", Wvd, xx, Vvd, xx, xx, mrm|vex, x, tevexw[2][1]},
{MOD_EXT, 0xf20f1110, "(mod ext 11)", xx, xx, xx, xx, xx, mrm|vex, x, 11},
{EVEX_W_EXT, 0x0f1110, "(evex_W ext 1)", xx, xx, xx, xx, xx, mrm|evex, x, 1},
{MOD_EXT, 0xf30f1110, "(mod ext 22)", xx, xx, xx, xx, xx, mrm|evex, x, 22},
{EVEX_W_EXT, 0x660f1110, "(evex_W ext 3)", xx, xx, xx, xx, xx, mrm|evex, x, 3},
{MOD_EXT, 0xf20f1110, "(mod ext 23)", xx, xx, xx, xx, xx, mrm|evex, x, 23},
},
/* prefix extension 2 */
{
/* i#319: note that the reg-reg form of the load version (0f12) is legal
* and has a separate pneumonic ("movhlps"), yet the reg-reg form of
* the store version (0f13) is illegal
*/
{OP_movlps, 0x0f1210, "movlps", Vq_dq, xx, Wq_dq, xx, xx, mrm, x, tpe[3][0]}, /*"movhlps" if reg-reg */
{OP_movsldup, 0xf30f1210, "movsldup", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_movlpd, 0x660f1210, "movlpd", Vq_dq, xx, Mq, xx, xx, mrm, x, tpe[3][2]},
{OP_movddup, 0xf20f1210, "movddup", Vpd, xx, Wq_dq, xx, xx, mrm, x, END_LIST},
{OP_vmovlps, 0x0f1210, "vmovlps", Vq_dq, xx, Hq_dq, Wq_dq, xx, mrm|vex|reqL0, x, tpe[3][4]}, /*"vmovhlps" if reg-reg */
{OP_vmovsldup,0xf30f1210, "vmovsldup", Vvs, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vmovlpd, 0x660f1210, "vmovlpd", Vq_dq, xx, Hq_dq, Mq, xx, mrm|vex, x, tpe[3][6]},
{OP_vmovddup, 0xf20f1210, "vmovddup", Vvd, xx, Wh_x, xx, xx, mrm|vex, x, END_LIST},
{EVEX_W_EXT, 0x0f1210, "(evex_W ext 14)", xx, xx, xx, xx, xx, mrm|evex, x, 14},
{INVALID, 0xf30f1210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f1210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f1210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 3 */
{
{OP_movlps, 0x0f1310, "movlps", Mq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movlpd, 0x660f1310, "movlpd", Mq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovlps, 0x0f1310, "vmovlps", Mq, xx, Vq_dq, xx, xx, mrm|vex, x, tevexw[14][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovlpd, 0x660f1310, "vmovlpd", Mq, xx, Vq_dq, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f1310, "(evex_W ext 15)", xx, xx, xx, xx, xx, mrm|evex, x, 15},
{INVALID, 0xf30f1310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f1310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f1310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 4 */
{
{OP_unpcklps, 0x0f1410, "unpcklps", Vps, xx, Wq_dq, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_unpcklpd, 0x660f1410, "unpcklpd", Vpd, xx, Wq_dq, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vunpcklps, 0x0f1410, "vunpcklps", Vvs, xx, Hh_x, Wh_x, xx, mrm|vex, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vunpcklpd, 0x660f1410, "vunpcklpd", Vvd, xx, Hh_x, Wh_x, xx, mrm|vex, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f1410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f1410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f1410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f1410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 5 */
{
{OP_unpckhps, 0x0f1510, "unpckhps", Vps, xx, Wq_dq, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_unpckhpd, 0x660f1510, "unpckhpd", Vpd, xx, Wq_dq, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vunpckhps, 0x0f1510, "vunpckhps", Vvs, xx, Hh_x, Wh_x, xx, mrm|vex, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vunpckhpd, 0x660f1510, "vunpckhpd", Vvd, xx, Hh_x, Wh_x, xx, mrm|vex, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f1510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f1510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f1510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f1510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 6 */
{
/* i#319: note that the reg-reg form of the load version (0f16) is legal
* and has a separate pneumonic ("movhlps"), yet the reg-reg form of
* the store version (0f17) is illegal
*/
{OP_movhps, 0x0f1610, "movhps", Vq_dq, xx, Wq_dq, xx, xx, mrm, x, tpe[7][0]}, /*"movlhps" if reg-reg */
{OP_movshdup, 0xf30f1610, "movshdup", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_movhpd, 0x660f1610, "movhpd", Vq_dq, xx, Mq, xx, xx, mrm, x, tpe[7][2]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovhps, 0x0f1610, "vmovhps", Vq_dq, xx, Hq_dq, Wq_dq, xx, mrm|vex|reqL0, x, tpe[7][4]}, /*"vmovlhps" if reg-reg */
{OP_vmovshdup, 0xf30f1610, "vmovshdup", Vvs, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vmovhpd, 0x660f1610, "vmovhpd", Vq_dq, xx, Hq_dq, Mq, xx, mrm|vex|reqL0, x, tpe[7][6]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f1610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f1610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f1610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f1610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 7 */
{
{OP_movhps, 0x0f1710, "movhps", Mq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movhpd, 0x660f1710, "movhpd", Mq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovhps, 0x0f1710, "vmovhps", Mq, xx, Vq_dq, xx, xx, mrm|vex|reqL0, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovhpd, 0x660f1710, "vmovhpd", Mq, xx, Vq_dq, xx, xx, mrm|vex|reqL0, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f1710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f1710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f1710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f1710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 8 */
{
{OP_movaps, 0x0f2810, "movaps", Vps, xx, Wps, xx, xx, mrm, x, tpe[9][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movapd, 0x660f2810, "movapd", Vpd, xx, Wpd, xx, xx, mrm, x, tpe[9][2]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovaps, 0x0f2810, "vmovaps", Vvs, xx, Wvs, xx, xx, mrm|vex, x, tpe[9][4]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovapd, 0x660f2810, "vmovapd", Vvd, xx, Wvd, xx, xx, mrm|vex, x, tpe[9][6]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f2810, "(evex_W ext 4)", xx, xx, xx, xx, xx, mrm|evex, x, 4},
{INVALID, 0xf30f2810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f2810, "(evex_W ext 6)", xx, xx, xx, xx, xx, mrm|evex, x, 6},
{INVALID, 0xf20f2810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 9 */
{
{OP_movaps, 0x0f2910, "movaps", Wps, xx, Vps, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movapd, 0x660f2910, "movapd", Wpd, xx, Vpd, xx, xx, mrm, x, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovaps, 0x0f2910, "vmovaps", Wvs, xx, Vvs, xx, xx, mrm|vex, x, tevexw[4][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovapd, 0x660f2910, "vmovapd", Wvd, xx, Vvd, xx, xx, mrm|vex, x, tevexw[6][1]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x0f2910, "(evex_W ext 5)", xx, xx, xx, xx, xx, mrm|evex, x, 5},
{INVALID, 0xf30f2910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0x660f2910, "(evex_W ext 7)", xx, xx, xx, xx, xx, mrm|evex, x, 7},
{INVALID, 0xf20f2910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 10 */
{
{OP_cvtpi2ps, 0x0f2a10, "cvtpi2ps", Vq_dq, xx, Qq, xx, xx, mrm, x, END_LIST},
{OP_cvtsi2ss, 0xf30f2a10, "cvtsi2ss", Vss, xx, Ed_q, xx, xx, mrm, x, END_LIST},
{OP_cvtpi2pd, 0x660f2a10, "cvtpi2pd", Vpd, xx, Qq, xx, xx, mrm, x, END_LIST},
{OP_cvtsi2sd, 0xf20f2a10, "cvtsi2sd", Vsd, xx, Ed_q, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtsi2ss, 0xf30f2a10, "vcvtsi2ss", Vss, xx, H12_dq, Ed_q, xx, mrm|vex, x, END_LIST},
{INVALID, 0x660f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtsi2sd, 0xf20f2a10, "vcvtsi2sd", Vsd, xx, Hsd, Ed_q, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f2a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 11 */
{
{OP_movntps, 0x0f2b10, "movntps", Mps, xx, Vps, xx, xx, mrm, x, END_LIST},
{OP_movntss, 0xf30f2b10, "movntss", Mss, xx, Vss, xx, xx, mrm, x, END_LIST},
{OP_movntpd, 0x660f2b10, "movntpd", Mpd, xx, Vpd, xx, xx, mrm, x, END_LIST},
{OP_movntsd, 0xf20f2b10, "movntsd", Msd, xx, Vsd, xx, xx, mrm, x, END_LIST},
{OP_vmovntps, 0x0f2b10, "vmovntps", Mvs, xx, Vvs, xx, xx, mrm|vex, x, END_LIST},
/* XXX: AMD doesn't list movntss in their new manual => assuming no vex version */
{INVALID, 0xf30f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovntpd, 0x660f2b10, "vmovntpd", Mvd, xx, Vvd, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f2b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 12 */
{
{OP_cvttps2pi, 0x0f2c10, "cvttps2pi", Pq, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_cvttss2si, 0xf30f2c10, "cvttss2si", Gd_q, xx, Wss, xx, xx, mrm, x, END_LIST},
{OP_cvttpd2pi, 0x660f2c10, "cvttpd2pi", Pq, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_cvttsd2si, 0xf20f2c10, "cvttsd2si", Gd_q, xx, Wsd, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvttss2si, 0xf30f2c10, "vcvttss2si", Gd_q, xx, Wss, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0x660f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvttsd2si, 0xf20f2c10, "vcvttsd2si", Gd_q, xx, Wsd, xx, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f2c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 13 */
{
{OP_cvtps2pi, 0x0f2d10, "cvtps2pi", Pq, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_cvtss2si, 0xf30f2d10, "cvtss2si", Gd_q, xx, Wss, xx, xx, mrm, x, END_LIST},
{OP_cvtpd2pi, 0x660f2d10, "cvtpd2pi", Pq, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_cvtsd2si, 0xf20f2d10, "cvtsd2si", Gd_q, xx, Wsd, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtss2si, 0xf30f2d10, "vcvtss2si", Gd_q, xx, Wss, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0x660f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtsd2si, 0xf20f2d10, "vcvtsd2si", Gd_q, xx, Wsd, xx, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f2d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 14 */
{
{OP_ucomiss, 0x0f2e10, "ucomiss", xx, xx, Vss, Wss, xx, mrm, fW6, END_LIST},
{INVALID, 0xf30f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_ucomisd, 0x660f2e10, "ucomisd", xx, xx, Vsd, Wsd, xx, mrm, fW6, END_LIST},
{INVALID, 0xf20f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vucomiss, 0x0f2e10, "vucomiss", xx, xx, Vss, Wss, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0xf30f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vucomisd, 0x660f2e10, "vucomisd", xx, xx, Vsd, Wsd, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0xf20f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f2e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 15 */
{
{OP_comiss, 0x0f2f10, "comiss", xx, xx, Vss, Wss, xx, mrm, fW6, END_LIST},
{INVALID, 0xf30f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_comisd, 0x660f2f10, "comisd", xx, xx, Vsd, Wsd, xx, mrm, fW6, END_LIST},
{INVALID, 0xf20f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcomiss, 0x0f2f10, "vcomiss", xx, xx, Vss, Wss, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0xf30f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vcomisd, 0x660f2f10, "vcomisd", xx, xx, Vsd, Wsd, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0xf20f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f2f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 16 */
{
{OP_movmskps, 0x0f5010, "movmskps", Gr, xx, Ups, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_movmskpd, 0x660f5010, "movmskpd", Gr, xx, Upd, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovmskps, 0x0f5010, "vmovmskps", Gr, xx, Uvs, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf30f5010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmovmskpd, 0x660f5010, "vmovmskpd", Gr, xx, Uvd, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 17 */
{
{OP_sqrtps, 0x0f5110, "sqrtps", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_sqrtss, 0xf30f5110, "sqrtss", Vss, xx, Wss, xx, xx, mrm, x, END_LIST},
{OP_sqrtpd, 0x660f5110, "sqrtpd", Vpd, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_sqrtsd, 0xf20f5110, "sqrtsd", Vsd, xx, Wsd, xx, xx, mrm, x, END_LIST},
{OP_vsqrtps, 0x0f5110, "vsqrtps", Vvs, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vsqrtss, 0xf30f5110, "vsqrtss", Vdq, xx, H12_dq, Wss, xx, mrm|vex, x, END_LIST},
{OP_vsqrtpd, 0x660f5110, "vsqrtpd", Vvd, xx, Wvd, xx, xx, mrm|vex, x, END_LIST},
{OP_vsqrtsd, 0xf20f5110, "vsqrtsd", Vdq, xx, Hsd, Wsd, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 18 */
{
{OP_rsqrtps, 0x0f5210, "rsqrtps", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_rsqrtss, 0xf30f5210, "rsqrtss", Vss, xx, Wss, xx, xx, mrm, x, END_LIST},
{INVALID, 0x660f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrsqrtps, 0x0f5210, "vrsqrtps", Vvs, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vrsqrtss, 0xf30f5210, "vrsqrtss", Vdq, xx, H12_dq, Wss, xx, mrm|vex, x, END_LIST},
{INVALID, 0x660f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 19 */
{
{OP_rcpps, 0x0f5310, "rcpps", Vps, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_rcpss, 0xf30f5310, "rcpss", Vss, xx, Wss, xx, xx, mrm, x, END_LIST},
{INVALID, 0x660f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrcpps, 0x0f5310, "vrcpps", Vvs, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vrcpss, 0xf30f5310, "vrcpss", Vdq, xx, H12_dq, Wss, xx, mrm|vex, x, END_LIST},
{INVALID, 0x660f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 20 */
{
{OP_andps, 0x0f5410, "andps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_andpd, 0x660f5410, "andpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vandps, 0x0f5410, "vandps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf30f5410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vandpd, 0x660f5410, "vandpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 21 */
{
{OP_andnps, 0x0f5510, "andnps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_andnpd, 0x660f5510, "andnpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vandnps, 0x0f5510, "vandnps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf30f5510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vandnpd, 0x660f5510, "vandnpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 22 */
{
{OP_orps, 0x0f5610, "orps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_orpd, 0x660f5610, "orpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorps, 0x0f5610, "vorps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf30f5610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vorpd, 0x660f5610, "vorpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 23 */
{
{OP_xorps, 0x0f5710, "xorps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0xf30f5710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_xorpd, 0x660f5710, "xorpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vxorps, 0x0f5710, "vxorps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf30f5710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vxorpd, 0x660f5710, "vxorpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 24 */
{
{OP_addps, 0x0f5810, "addps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_addss, 0xf30f5810, "addss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_addpd, 0x660f5810, "addpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_addsd, 0xf20f5810, "addsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vaddps, 0x0f5810, "vaddps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{OP_vaddss, 0xf30f5810, "vaddss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, END_LIST},
{OP_vaddpd, 0x660f5810, "vaddpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vaddsd, 0xf20f5810, "vaddsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 25 */
{
{OP_mulps, 0x0f5910, "mulps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_mulss, 0xf30f5910, "mulss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_mulpd, 0x660f5910, "mulpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_mulsd, 0xf20f5910, "mulsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vmulps, 0x0f5910, "vmulps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{OP_vmulss, 0xf30f5910, "vmulss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, END_LIST},
{OP_vmulpd, 0x660f5910, "vmulpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vmulsd, 0xf20f5910, "vmulsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, END_LIST},
},
/* prefix extension 26 */
{
{OP_cvtps2pd, 0x0f5a10, "cvtps2pd", Vpd, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_cvtss2sd, 0xf30f5a10, "cvtss2sd", Vsd, xx, Wss, xx, xx, mrm, x, END_LIST},
{OP_cvtpd2ps, 0x660f5a10, "cvtpd2ps", Vps, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_cvtsd2ss, 0xf20f5a10, "cvtsd2ss", Vss, xx, Wsd, xx, xx, mrm, x, END_LIST},
{OP_vcvtps2pd, 0x0f5a10, "vcvtps2pd", Vvd, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vcvtss2sd, 0xf30f5a10, "vcvtss2sd", Vsd, xx, Hsd, Wss, xx, mrm|vex, x, END_LIST},
{OP_vcvtpd2ps, 0x660f5a10, "vcvtpd2ps", Vvs, xx, Wvd, xx, xx, mrm|vex, x, END_LIST},
{OP_vcvtsd2ss, 0xf20f5a10, "vcvtsd2ss", Vss, xx, H12_dq, Wsd, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 27 */
{
{OP_cvtdq2ps, 0x0f5b10, "cvtdq2ps", Vps, xx, Wdq, xx, xx, mrm, x, END_LIST},
{OP_cvttps2dq, 0xf30f5b10, "cvttps2dq", Vdq, xx, Wps, xx, xx, mrm, x, END_LIST},
{OP_cvtps2dq, 0x660f5b10, "cvtps2dq", Vdq, xx, Wps, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtdq2ps, 0x0f5b10, "vcvtdq2ps", Vvs, xx, Wx, xx, xx, mrm|vex, x, END_LIST},
{OP_vcvttps2dq, 0xf30f5b10, "vcvttps2dq", Vx, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{OP_vcvtps2dq, 0x660f5b10, "vcvtps2dq", Vx, xx, Wvs, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 28 */
{
{OP_subps, 0x0f5c10, "subps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_subss, 0xf30f5c10, "subss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_subpd, 0x660f5c10, "subpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_subsd, 0xf20f5c10, "subsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vsubps, 0x0f5c10, "vsubps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{OP_vsubss, 0xf30f5c10, "vsubss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, END_LIST},
{OP_vsubpd, 0x660f5c10, "vsubpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vsubsd, 0xf20f5c10, "vsubsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 29 */
{
{OP_minps, 0x0f5d10, "minps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_minss, 0xf30f5d10, "minss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_minpd, 0x660f5d10, "minpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_minsd, 0xf20f5d10, "minsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vminps, 0x0f5d10, "vminps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{OP_vminss, 0xf30f5d10, "vminss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, END_LIST},
{OP_vminpd, 0x660f5d10, "vminpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vminsd, 0xf20f5d10, "vminsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 30 */
{
{OP_divps, 0x0f5e10, "divps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_divss, 0xf30f5e10, "divss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_divpd, 0x660f5e10, "divpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_divsd, 0xf20f5e10, "divsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vdivps, 0x0f5e10, "vdivps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{OP_vdivss, 0xf30f5e10, "vdivss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, END_LIST},
{OP_vdivpd, 0x660f5e10, "vdivpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vdivsd, 0xf20f5e10, "vdivsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 31 */
{
{OP_maxps, 0x0f5f10, "maxps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{OP_maxss, 0xf30f5f10, "maxss", Vss, xx, Wss, Vss, xx, mrm, x, END_LIST},
{OP_maxpd, 0x660f5f10, "maxpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_maxsd, 0xf20f5f10, "maxsd", Vsd, xx, Wsd, Vsd, xx, mrm, x, END_LIST},
{OP_vmaxps, 0x0f5f10, "vmaxps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
{OP_vmaxss, 0xf30f5f10, "vmaxss", Vdq, xx, Hdq, Wss, xx, mrm|vex, x, END_LIST},
{OP_vmaxpd, 0x660f5f10, "vmaxpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vmaxsd, 0xf20f5f10, "vmaxsd", Vdq, xx, Hdq, Wsd, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f5f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f5f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f5f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f5f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 32 */
{
{OP_punpcklbw, 0x0f6010, "punpcklbw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[32][2]},
{INVALID, 0xf30f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpcklbw, 0x660f6010, "punpcklbw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpcklbw, 0x660f6010, "vpunpcklbw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 33 */
{
{OP_punpcklwd, 0x0f6110, "punpcklwd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[33][2]},
{INVALID, 0xf30f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpcklwd, 0x660f6110, "punpcklwd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpcklwd, 0x660f6110, "vpunpcklwd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 34 */
{
{OP_punpckldq, 0x0f6210, "punpckldq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[34][2]},
{INVALID, 0xf30f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckldq, 0x660f6210, "punpckldq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckldq, 0x660f6210, "vpunpckldq", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 35 */
{
{OP_packsswb, 0x0f6310, "packsswb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[35][2]},
{INVALID, 0xf30f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_packsswb, 0x660f6310, "packsswb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpacksswb, 0x660f6310, "vpacksswb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 36 */
{
{OP_pcmpgtb, 0x0f6410, "pcmpgtb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[36][2]},
{INVALID, 0xf30f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpgtb, 0x660f6410, "pcmpgtb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpgtb, 0x660f6410, "vpcmpgtb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 37 */
{
{OP_pcmpgtw, 0x0f6510, "pcmpgtw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[37][2]},
{INVALID, 0xf30f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpgtw, 0x660f6510, "pcmpgtw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpgtw, 0x660f6510, "vpcmpgtw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 38 */
{
{OP_pcmpgtd, 0x0f6610, "pcmpgtd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[38][2]},
{INVALID, 0xf30f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpgtd, 0x660f6610, "pcmpgtd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpgtd, 0x660f6610, "vpcmpgtd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 39 */
{
{OP_packuswb, 0x0f6710, "packuswb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[39][2]},
{INVALID, 0xf30f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_packuswb, 0x660f6710, "packuswb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpackuswb, 0x660f6710, "vpackuswb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 40 */
{
{OP_punpckhbw, 0x0f6810, "punpckhbw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[40][2]},
{INVALID, 0xf30f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckhbw, 0x660f6810, "punpckhbw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckhbw, 0x660f6810, "vpunpckhbw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 41 */
{
{OP_punpckhwd, 0x0f6910, "punpckhwd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[41][2]},
{INVALID, 0xf30f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckhwd, 0x660f6910, "punpckhwd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckhwd, 0x660f6910, "vpunpckhwd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 42 */
{
{OP_punpckhdq, 0x0f6a10, "punpckhdq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[42][2]},
{INVALID, 0xf30f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckhdq, 0x660f6a10, "punpckhdq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckhdq, 0x660f6a10, "vpunpckhdq", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 43 */
{
{OP_packssdw, 0x0f6b10, "packssdw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[43][2]},
{INVALID, 0xf30f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_packssdw, 0x660f6b10, "packssdw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpackssdw, 0x660f6b10, "vpackssdw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 44 */
{
{INVALID, 0x0f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpcklqdq, 0x660f6c10, "punpcklqdq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpcklqdq, 0x660f6c10, "vpunpcklqdq", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 45 */
{
{INVALID, 0x0f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_punpckhqdq, 0x660f6d10, "punpckhqdq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpunpckhqdq, 0x660f6d10, "vpunpckhqdq", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 46 */
{
/* movd zeroes the top bits when the destination is an mmx or xmm reg */
{OP_movd, 0x0f6e10, "movd", Pq, xx, Ed_q, xx, xx, mrm, x, tpe[46][2]},
{INVALID, 0xf30f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movd, 0x660f6e10, "movd", Vdq, xx, Ed_q, xx, xx, mrm, x, tpe[51][0]},
{INVALID, 0xf20f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovd, 0x660f6e10, "vmovd", Vdq, xx, Ed_q, xx, xx, mrm|vex, x, tpe[51][6]},
{INVALID, 0xf20f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f6e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 47: all assumed to have Ib */
{
{OP_pshufw, 0x0f7010, "pshufw", Pq, xx, Qq, Ib, xx, mrm, x, END_LIST},
{OP_pshufhw, 0xf30f7010, "pshufhw", Vdq, xx, Wdq, Ib, xx, mrm, x, END_LIST},
{OP_pshufd, 0x660f7010, "pshufd", Vdq, xx, Wdq, Ib, xx, mrm, x, END_LIST},
{OP_pshuflw, 0xf20f7010, "pshuflw", Vdq, xx, Wdq, Ib, xx, mrm, x, END_LIST},
{INVALID, 0x0f7010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpshufhw, 0xf30f7010, "vpshufhw", Vx, xx, Wx, Ib, xx, mrm|vex, x, END_LIST},
{OP_vpshufd, 0x660f7010, "vpshufd", Vx, xx, Wx, Ib, xx, mrm|vex, x, END_LIST},
{OP_vpshuflw, 0xf20f7010, "vpshuflw", Vx, xx, Wx, Ib, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 48 */
{
{OP_pcmpeqb, 0x0f7410, "pcmpeqb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[48][2]},
{INVALID, 0xf30f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpeqb, 0x660f7410, "pcmpeqb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpeqb, 0x660f7410, "vpcmpeqb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 49 */
{
{OP_pcmpeqw, 0x0f7510, "pcmpeqw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[49][2]},
{INVALID, 0xf30f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpeqw, 0x660f7510, "pcmpeqw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpeqw, 0x660f7510, "vpcmpeqw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 50 */
{
{OP_pcmpeqd, 0x0f7610, "pcmpeqd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[50][2]},
{INVALID, 0xf30f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pcmpeqd, 0x660f7610, "pcmpeqd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpcmpeqd, 0x660f7610, "vpcmpeqd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 51 */
{
{OP_movd, 0x0f7e10, "movd", Ed_q, xx, Pd_q, xx, xx, mrm, x, tpe[51][2]},
/* movq zeroes the top bits when the destination is an mmx or xmm reg */
{OP_movq, 0xf30f7e10, "movq", Vdq, xx, Wq_dq, xx, xx, mrm, x, tpe[61][2]},
{OP_movd, 0x660f7e10, "movd", Ed_q, xx, Vd_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovq, 0xf30f7e10, "vmovq", Vdq, xx, Wq_dq, xx, xx, mrm|vex, x, tpe[61][6]},
{OP_vmovd, 0x660f7e10, "vmovd", Ed_q, xx, Vd_dq, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 52: all assumed to have Ib */
{
{OP_cmpps, 0x0fc210, "cmpps", Vps, xx, Wps, Ib, Vps, mrm, x, END_LIST},
{OP_cmpss, 0xf30fc210, "cmpss", Vss, xx, Wss, Ib, Vss, mrm, x, END_LIST},
{OP_cmppd, 0x660fc210, "cmppd", Vpd, xx, Wpd, Ib, Vpd, mrm, x, END_LIST},
{OP_cmpsd, 0xf20fc210, "cmpsd", Vsd, xx, Wsd, Ib, Vsd, mrm, x, END_LIST},
{OP_vcmpps, 0x0fc210, "vcmpps", Vvs, xx, Hvs, Wvs, Ib, mrm|vex, x, END_LIST},
{OP_vcmpss, 0xf30fc210, "vcmpss", Vdq, xx, Hdq, Wss, Ib, mrm|vex, x, END_LIST},
{OP_vcmppd, 0x660fc210, "vcmppd", Vvd, xx, Hvd, Wvd, Ib, mrm|vex, x, END_LIST},
{OP_vcmpsd, 0xf20fc210, "vcmpsd", Vdq, xx, Hdq, Wsd, Ib, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fc210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fc210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fc210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 53: all assumed to have Ib */
{ /* note that gnu tools print immed first: pinsrw $0x0,(%esp),%xmm0 */
/* FIXME i#1388: pinsrw actually reads only bottom word of reg */
{OP_pinsrw, 0x0fc410, "pinsrw", Pw_q, xx, Rd_Mw, Ib, xx, mrm, x, tpe[53][2]},
{INVALID, 0xf30fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pinsrw, 0x660fc410, "pinsrw", Vw_dq, xx, Rd_Mw, Ib, xx, mrm, x, END_LIST},
{INVALID, 0xf20fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpinsrw, 0x660fc410, "vpinsrw", Vdq, xx, H14_dq, Rd_Mw, Ib, mrm|vex, x, END_LIST},
{INVALID, 0xf20fc410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fc410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fc410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fc410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 54: all assumed to have Ib */
{ /* note that gnu tools print immed first: pextrw $0x7,%xmm7,%edx */
{OP_pextrw, 0x0fc510, "pextrw", Gd, xx, Nw_q, Ib, xx, mrm, x, tpe[54][2]},
{INVALID, 0xf30fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pextrw, 0x660fc510, "pextrw", Gd, xx, Uw_dq, Ib, xx, mrm, x, tvex[37][0]},
{INVALID, 0xf20fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpextrw, 0x660fc510, "vpextrw", Gd, xx, Uw_dq, Ib, xx, mrm|vex, x, tvex[37][1]},
{INVALID, 0xf20fc510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fc510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fc510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fc510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 55: all assumed to have Ib */
{
{OP_shufps, 0x0fc610, "shufps", Vps, xx, Wps, Ib, Vps, mrm, x, END_LIST},
{INVALID, 0xf30fc610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_shufpd, 0x660fc610, "shufpd", Vpd, xx, Wpd, Ib, Vpd, mrm, x, END_LIST},
{INVALID, 0xf20fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshufps, 0x0fc610, "vshufps", Vvs, xx, Hvs, Wvs, Ib, mrm|vex, x, END_LIST},
{INVALID, 0xf30fc610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vshufpd, 0x660fc610, "vshufpd", Vvd, xx, Hvd, Wvd, Ib, mrm|vex, x, END_LIST},
{INVALID, 0xf20fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fc610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 56 */
{
{OP_psrlw, 0x0fd110, "psrlw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[56][2]},
{INVALID, 0xf30fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psrlw, 0x660fd110, "psrlw", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[104][0]},
{INVALID, 0xf20fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsrlw, 0x660fd110, "vpsrlw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[104][6]},
{INVALID, 0xf20fd110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 57 */
{
{OP_psrld, 0x0fd210, "psrld", Pq, xx, Qq, Pq, xx, mrm, x, tpe[57][2]},
{INVALID, 0xf30fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psrld, 0x660fd210, "psrld", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[107][0]},
{INVALID, 0xf20fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsrld, 0x660fd210, "vpsrld", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[107][6]},
{INVALID, 0xf20fd210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 58 */
{
{OP_psrlq, 0x0fd310, "psrlq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[58][2]},
{INVALID, 0xf30fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psrlq, 0x660fd310, "psrlq", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[110][0]},
{INVALID, 0xf20fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsrlq, 0x660fd310, "vpsrlq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[110][6]},
{INVALID, 0xf20fd310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 59 */
{
{OP_paddq, 0x0fd410, "paddq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[59][2]},
{INVALID, 0xf30fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_paddq, 0x660fd410, "paddq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fd410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddq, 0x660fd410, "vpaddq", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 60 */
{
{OP_pmullw, 0x0fd510, "pmullw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[60][2]},
{INVALID, 0xf30fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pmullw, 0x660fd510, "pmullw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fd510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmullw, 0x660fd510, "vpmullw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 61 */
{
{INVALID, 0x0fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_movq2dq, 0xf30fd610, "movq2dq", Vdq, xx, Nq, xx, xx, mrm, x, END_LIST},
{OP_movq, 0x660fd610, "movq", Wq_dq, xx, Vq_dq, xx, xx, mrm, x, END_LIST},
{OP_movdq2q, 0xf20fd610, "movdq2q", Pq, xx, Uq_dq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmovq, 0x660fd610, "vmovq", Wq_dq, xx, Vq_dq, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 62 */
{
{OP_pmovmskb, 0x0fd710, "pmovmskb", Gd, xx, Nq, xx, xx, mrm, x, tpe[62][2]},
{INVALID, 0xf30fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pmovmskb, 0x660fd710, "pmovmskb", Gd, xx, Udq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmovmskb, 0x660fd710, "vpmovmskb", Gd, xx, Ux, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 63 */
{
{OP_psubusb, 0x0fd810, "psubusb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[63][2]},
{INVALID, 0xf30fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubusb, 0x660fd810, "psubusb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubusb, 0x660fd810, "vpsubusb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fd810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 64 */
{
{OP_psubusw, 0x0fd910, "psubusw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[64][2]},
{INVALID, 0xf30fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubusw, 0x660fd910, "psubusw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubusw, 0x660fd910, "vpsubusw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fd910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 65 */
{
{OP_pminub, 0x0fda10, "pminub", Pq, xx, Qq, Pq, xx, mrm, x, tpe[65][2]},
{INVALID, 0xf30fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pminub, 0x660fda10, "pminub", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpminub, 0x660fda10, "vpminub", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fda10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 66 */
{
{OP_pand, 0x0fdb10, "pand", Pq, xx, Qq, Pq, xx, mrm, x, tpe[66][2]},
{INVALID, 0xf30fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pand, 0x660fdb10, "pand", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpand, 0x660fdb10, "vpand", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fdb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 67 */
{
{OP_paddusb, 0x0fdc10, "paddusb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[67][2]},
{INVALID, 0xf30fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddusb, 0x660fdc10, "paddusb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddusb, 0x660fdc10, "vpaddusb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fdc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 68 */
{
{OP_paddusw, 0x0fdd10, "paddusw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[68][2]},
{INVALID, 0xf30fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddusw, 0x660fdd10, "paddusw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddusw, 0x660fdd10, "vpaddusw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fdd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 69 */
{
{OP_pmaxub, 0x0fde10, "pmaxub", Pq, xx, Qq, Pq, xx, mrm, x, tpe[69][2]},
{INVALID, 0xf30fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmaxub, 0x660fde10, "pmaxub", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmaxub, 0x660fde10, "vpmaxub", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fde10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fde10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fde10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fde10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fde10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 70 */
{
{OP_pandn, 0x0fdf10, "pandn", Pq, xx, Qq, Pq, xx, mrm, x, tpe[70][2]},
{INVALID, 0xf30fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pandn, 0x660fdf10, "pandn", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpandn, 0x660fdf10, "vpandn", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fdf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 71 */
{
{OP_pavgb, 0x0fe010, "pavgb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[71][2]},
{INVALID, 0xf30fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pavgb, 0x660fe010, "pavgb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpavgb, 0x660fe010, "vpavgb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fe010, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 72 */
{
{OP_psraw, 0x0fe110, "psraw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[72][2]},
{INVALID, 0xf30fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psraw, 0x660fe110, "psraw", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[105][0]},
{INVALID, 0xf20fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsraw, 0x660fe110, "vpsraw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[105][6]},
{INVALID, 0xf20fe110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 73 */
{
{OP_psrad, 0x0fe210, "psrad", Pq, xx, Qq, Pq, xx, mrm, x, tpe[73][2]},
{INVALID, 0xf30fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psrad, 0x660fe210, "psrad", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[108][0]},
{INVALID, 0xf20fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsrad, 0x660fe210, "vpsrad", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[108][6]},
{INVALID, 0xf20fe210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 74 */
{
{OP_pavgw, 0x0fe310, "pavgw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[74][2]},
{INVALID, 0xf30fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pavgw, 0x660fe310, "pavgw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpavgw, 0x660fe310, "vpavgw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fe310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 75 */
{
{OP_pmulhuw, 0x0fe410, "pmulhuw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[75][2]},
{INVALID, 0xf30fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmulhuw, 0x660fe410, "pmulhuw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmulhuw, 0x660fe410, "vpmulhuw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fe410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 76 */
{
{OP_pmulhw, 0x0fe510, "pmulhw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[76][2]},
{INVALID, 0xf30fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmulhw, 0x660fe510, "pmulhw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmulhw, 0x660fe510, "vpmulhw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fe510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 77 */
{
{INVALID, 0x0fe610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_cvtdq2pd, 0xf30fe610, "cvtdq2pd", Vpd, xx, Wq_dq, xx, xx, mrm, x, END_LIST},
{OP_cvttpd2dq,0x660fe610, "cvttpd2dq", Vdq, xx, Wpd, xx, xx, mrm, x, END_LIST},
{OP_cvtpd2dq, 0xf20fe610, "cvtpd2dq", Vdq, xx, Wpd, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0fe610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vcvtdq2pd, 0xf30fe610, "vcvtdq2pd", Vvd, xx, Wvq_dq, xx, xx, mrm|vex, x, END_LIST},
{OP_vcvttpd2dq,0x660fe610, "vcvttpd2dq", Vx, xx, Wvd, xx, xx, mrm|vex, x, END_LIST},
{OP_vcvtpd2dq, 0xf20fe610, "vcvtpd2dq", Vx, xx, Wvd, xx, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 78 */
{
{OP_movntq, 0x0fe710, "movntq", Mq, xx, Pq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf30fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_movntdq, 0x660fe710, "movntdq", Mdq, xx, Vdq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmovntdq, 0x660fe710, "vmovntdq", Mx, xx, Vx, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fe710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 79 */
{
{OP_psubsb, 0x0fe810, "psubsb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[79][2]},
{INVALID, 0xf30fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubsb, 0x660fe810, "psubsb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubsb, 0x660fe810, "vpsubsb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fe810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 80 */
{
{OP_psubsw, 0x0fe910, "psubsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[80][2]},
{INVALID, 0xf30fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubsw, 0x660fe910, "psubsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubsw, 0x660fe910, "vpsubsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fe910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fe910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fe910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fe910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fe910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 81 */
{
{OP_pminsw, 0x0fea10, "pminsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[81][2]},
{INVALID, 0xf30fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pminsw, 0x660fea10, "pminsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpminsw, 0x660fea10, "vpminsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fea10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 82 */
{
{OP_por, 0x0feb10, "por", Pq, xx, Qq, Pq, xx, mrm, x, tpe[82][2]},
{INVALID, 0xf30feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_por, 0x660feb10, "por", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpor, 0x660feb10, "vpor", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20feb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0feb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30feb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660feb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20feb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 83 */
{
{OP_paddsb, 0x0fec10, "paddsb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[83][2]},
{INVALID, 0xf30fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddsb, 0x660fec10, "paddsb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddsb, 0x660fec10, "vpaddsb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fec10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 84 */
{
{OP_paddsw, 0x0fed10, "paddsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[84][2]},
{INVALID, 0xf30fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddsw, 0x660fed10, "paddsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddsw, 0x660fed10, "vpaddsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fed10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 85 */
{
{OP_pmaxsw, 0x0fee10, "pmaxsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[85][2]},
{INVALID, 0xf30fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmaxsw, 0x660fee10, "pmaxsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmaxsw, 0x660fee10, "vpmaxsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fee10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 86 */
{
{OP_pxor, 0x0fef10, "pxor", Pq, xx, Qq, Pq, xx, mrm, x, tpe[86][2]},
{INVALID, 0xf30fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pxor, 0x660fef10, "pxor", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpxor, 0x660fef10, "vpxor", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20fef10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 87 */
{
{OP_psllw, 0x0ff110, "psllw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[87][2]},
{INVALID, 0xf30ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psllw, 0x660ff110, "psllw", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[106][0]},
{INVALID, 0xf20ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsllw, 0x660ff110, "vpsllw", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[106][6]},
{INVALID, 0xf20ff110, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 88 */
{
{OP_pslld, 0x0ff210, "pslld", Pq, xx, Qq, Pq, xx, mrm, x, tpe[88][2]},
{INVALID, 0xf30ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pslld, 0x660ff210, "pslld", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[109][0]},
{INVALID, 0xf20ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpslld, 0x660ff210, "vpslld", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[109][6]},
{INVALID, 0xf20ff210, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 89 */
{
{OP_psllq, 0x0ff310, "psllq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[89][2]},
{INVALID, 0xf30ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psllq, 0x660ff310, "psllq", Vdq, xx, Wdq, Vdq, xx, mrm, x, tpe[111][0]},
{INVALID, 0xf20ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsllq, 0x660ff310, "vpsllq", Vx, xx, Hx, Wx, xx, mrm|vex, x, tpe[111][6]},
{INVALID, 0xf20ff310, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 90 */
{
{OP_pmuludq, 0x0ff410, "pmuludq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[90][2]},
{INVALID, 0xf30ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmuludq, 0x660ff410, "pmuludq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmuludq, 0x660ff410, "vpmuludq", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ff410, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 91 */
{
{OP_pmaddwd, 0x0ff510, "pmaddwd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[91][2]},
{INVALID, 0xf30ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_pmaddwd, 0x660ff510, "pmaddwd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpmaddwd, 0x660ff510, "vpmaddwd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ff510, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 92 */
{
{OP_psadbw, 0x0ff610, "psadbw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[92][2]},
{INVALID, 0xf30ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psadbw, 0x660ff610, "psadbw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsadbw, 0x660ff610, "vpsadbw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ff610, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 93 */
{
{OP_maskmovq, 0x0ff710, "maskmovq", Bq, xx, Pq, Nq, xx, mrm|predcx, x, END_LIST}, /* Intel table says "Ppi, Qpi" */
{INVALID, 0xf30ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_maskmovdqu, 0x660ff710, "maskmovdqu", Bdq, xx, Vdq, Udq, xx, mrm|predcx, x, END_LIST},
{INVALID, 0xf20ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmaskmovdqu, 0x660ff710, "vmaskmovdqu", Bdq, xx, Vdq, Udq, xx, mrm|vex|reqL0|predcx, x, END_LIST},
{INVALID, 0xf20ff710, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 94 */
{
{OP_psubb, 0x0ff810, "psubb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[94][2]},
{INVALID, 0xf30ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubb, 0x660ff810, "psubb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubb, 0x660ff810, "vpsubb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ff810, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 95 */
{
{OP_psubw, 0x0ff910, "psubw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[95][2]},
{INVALID, 0xf30ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubw, 0x660ff910, "psubw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubw, 0x660ff910, "vpsubw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ff910, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 96 */
{
{OP_psubd, 0x0ffa10, "psubd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[96][2]},
{INVALID, 0xf30ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_psubd, 0x660ffa10, "psubd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpsubd, 0x660ffa10, "vpsubd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 97 */
{
{OP_psubq, 0x0ffb10, "psubq", Pq, xx, Qq, Pq, xx, mrm, x, tpe[97][2]},
{INVALID, 0xf30ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psubq, 0x660ffb10, "psubq", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsubq, 0x660ffb10, "vpsubq", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 98 */
{
{OP_paddb, 0x0ffc10, "paddb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[98][2]},
{INVALID, 0xf30ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddb, 0x660ffc10, "paddb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddb, 0x660ffc10, "vpaddb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 99 */
{
{OP_paddw, 0x0ffd10, "paddw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[99][2]},
{INVALID, 0xf30ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddw, 0x660ffd10, "paddw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddw, 0x660ffd10, "vpaddw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 100 */
{
{OP_paddd, 0x0ffe10, "paddd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[100][2]},
{INVALID, 0xf30ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_paddd, 0x660ffe10, "paddd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf20ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x0ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf30ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vpaddd, 0x660ffe10, "vpaddd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 101: all assumed to have Ib */
{
{INVALID, 0x0f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrldq, 0x660f7333, "psrldq", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrldq, 0x660f7333, "vpsrldq", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7333, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 102: all assumed to have Ib */
{
{INVALID, 0x0f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pslldq, 0x660f7337, "pslldq", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpslldq, 0x660f7337, "vpslldq", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7337, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 103 */
{
{REX_B_EXT, 0x900000, "(rex.b ext 0)", xx, xx, xx, xx, xx, no, x, 0},
{OP_pause,0xf3900000, "pause", xx, xx, xx, xx, xx, no, x, END_LIST},
/* we chain these even though encoding won't find them */
{OP_nop, 0x66900000, "nop", xx, xx, xx, xx, xx, no, x, tpe[103][3]},
/* windbg displays as "repne nop" */
{OP_nop, 0xf2900000, "nop", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0x900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 104: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psrlw, 0x0f7132, "psrlw", Nq, xx, Ib, Nq, xx, mrm, x, tpe[104][2]},
{INVALID, 0xf30f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrlw, 0x660f7132, "psrlw", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrlw, 0x660f7132, "vpsrlw", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7132, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 105: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psraw, 0x0f7134, "psraw", Nq, xx, Ib, Nq, xx, mrm, x, tpe[105][2]},
{INVALID, 0xf30f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psraw, 0x660f7134, "psraw", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsraw, 0x660f7134, "vpsraw", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7134, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 106: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psllw, 0x0f7136, "psllw", Nq, xx, Ib, Nq, xx, mrm, x, tpe[106][2]},
{INVALID, 0xf30f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psllw, 0x660f7136, "psllw", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsllw, 0x660f7136, "vpsllw", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7136, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 107: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psrld, 0x0f7232, "psrld", Nq, xx, Ib, Nq, xx, mrm, x, tpe[107][2]},
{INVALID, 0xf30f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrld, 0x660f7232, "psrld", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrld, 0x660f7232, "vpsrld", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7232, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 108: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psrad, 0x0f7234, "psrad", Nq, xx, Ib, Nq, xx, mrm, x, tpe[108][2]},
{INVALID, 0xf30f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrad, 0x660f7234, "psrad", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrad, 0x660f7234, "vpsrad", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7234, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 109: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_pslld, 0x0f7236, "pslld", Nq, xx, Ib, Nq, xx, mrm, x, tpe[109][2]},
{INVALID, 0xf30f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pslld, 0x660f7236, "pslld", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpslld, 0x660f7236, "vpslld", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7236, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 110: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psrlq, 0x0f7332, "psrlq", Nq, xx, Ib, Nq, xx, mrm, x, tpe[110][2]},
{INVALID, 0xf30f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psrlq, 0x660f7332, "psrlq", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsrlq, 0x660f7332, "vpsrlq", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7332, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 111: all assumed to have Ib */
{
/* Intel tables imply they may add opcodes in the mod<3 (mem) space in future */
{OP_psllq, 0x0f7336, "psllq", Nq, xx, Ib, Nq, xx, mrm, x, tpe[111][2]},
{INVALID, 0xf30f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psllq, 0x660f7336, "psllq", Udq, xx, Ib, Udq, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsllq, 0x660f7336, "vpsllq", Hx, xx, Ib, Ux, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7336, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 112 */
{
{OP_movq, 0x0f6f10, "movq", Pq, xx, Qq, xx, xx, mrm, x, tpe[113][0]},
{OP_movdqu, 0xf30f6f10, "movdqu", Vdq, xx, Wdq, xx, xx, mrm, x, tpe[113][1]},
{OP_movdqa, 0x660f6f10, "movdqa", Vdq, xx, Wdq, xx, xx, mrm, x, tpe[113][2]},
{INVALID, 0xf20f6f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f6f10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmovdqu, 0xf30f6f10, "vmovdqu", Vx, xx, Wx, xx, xx, mrm|vex, x, tpe[113][5]},
{OP_vmovdqa, 0x660f6f10, "vmovdqa", Vx, xx, Wx, xx, xx, mrm|vex, x, tpe[113][6]},
{INVALID, 0xf20f6f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f6f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f6f10, "(evex_W ext 11)", xx, xx, xx, xx, xx, mrm|evex, x, 11},
{EVEX_W_EXT, 0x660f6f10, "(evex_W ext 8)", xx, xx, xx, xx, xx, mrm|evex, x, 8},
{EVEX_W_EXT, 0xf20f6f10, "(evex_W ext 10)", xx, xx, xx, xx, xx, mrm|evex, x, 10},
},
/* prefix extension 113 */
{
{OP_movq, 0x0f7f10, "movq", Qq, xx, Pq, xx, xx, mrm, x, tpe[51][1]},
{OP_movdqu, 0xf30f7f10, "movdqu", Wdq, xx, Vdq, xx, xx, mrm, x, END_LIST},
{OP_movdqa, 0x660f7f10, "movdqa", Wdq, xx, Vdq, xx, xx, mrm, x, END_LIST},
{INVALID, 0xf20f7f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7f10, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vmovdqu, 0xf30f7f10, "vmovdqu", Wx, xx, Vx, xx, xx, mrm|vex, x, END_LIST},
{OP_vmovdqa, 0x660f7f10, "vmovdqa", Wx, xx, Vx, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf20f7f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f7f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EVEX_W_EXT, 0xf30f7f10, "(evex_W ext 13)", xx, xx, xx, xx, xx, mrm|evex, x, 13},
{EVEX_W_EXT, 0x660f7f10, "(evex_W ext 9)", xx, xx, xx, xx, xx, mrm|evex, x, 9},
{EVEX_W_EXT, 0xf20f7f10, "(evex_W ext 12)", xx, xx, xx, xx, xx, mrm|evex, x, 12},
},
/* prefix extension 114 */
{
{INVALID, 0x0f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_haddpd, 0x660f7c10, "haddpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_haddps, 0xf20f7c10, "haddps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x0f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vhaddpd, 0x660f7c10, "vhaddpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vhaddps, 0xf20f7c10, "vhaddps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 115 */
{
{INVALID, 0x0f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_hsubpd, 0x660f7d10, "hsubpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_hsubps, 0xf20f7d10, "hsubps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x0f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vhsubpd, 0x660f7d10, "vhsubpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vhsubps, 0xf20f7d10, "vhsubps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 116 */
{
{INVALID, 0x0fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_addsubpd, 0x660fd010, "addsubpd", Vpd, xx, Wpd, Vpd, xx, mrm, x, END_LIST},
{OP_addsubps, 0xf20fd010, "addsubps", Vps, xx, Wps, Vps, xx, mrm, x, END_LIST},
{INVALID, 0x0fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vaddsubpd, 0x660fd010, "vaddsubpd", Vvd, xx, Hvd, Wvd, xx, mrm|vex, x, END_LIST},
{OP_vaddsubps, 0xf20fd010, "vaddsubps", Vvs, xx, Hvs, Wvs, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fd010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/* prefix extension 117 */
{
{INVALID, 0x0ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_lddqu, 0xf20ff010, "lddqu", Vdq, xx, Mdq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vlddqu, 0xf20ff010, "vlddqu", Vx, xx, Mx, xx, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20ff010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
/***************************************************
* SSSE3
*/
{ /* prefix extension 118 */
{OP_pshufb, 0x380018, "pshufb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[118][2]},
{INVALID, 0xf3380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pshufb, 0x66380018, "pshufb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380018, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpshufb, 0x66380018, "vpshufb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 119 */
{OP_phaddw, 0x380118, "phaddw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[119][2]},
{INVALID, 0xf3380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phaddw, 0x66380118, "phaddw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380118, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphaddw, 0x66380118, "vphaddw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 120 */
{OP_phaddd, 0x380218, "phaddd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[120][2]},
{INVALID, 0xf3380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phaddd, 0x66380218, "phaddd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380218, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphaddd, 0x66380218, "vphaddd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 121 */
{OP_phaddsw, 0x380318, "phaddsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[121][2]},
{INVALID, 0xf3380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phaddsw, 0x66380318, "phaddsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380318, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphaddsw, 0x66380318, "vphaddsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 122 */
{OP_pmaddubsw, 0x380418, "pmaddubsw",Pq, xx, Qq, Pq, xx, mrm, x, tpe[122][2]},
{INVALID, 0xf3380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pmaddubsw, 0x66380418, "pmaddubsw",Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380418, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmaddubsw, 0x66380418, "vpmaddubsw",Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 123 */
{OP_phsubw, 0x380518, "phsubw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[123][2]},
{INVALID, 0xf3380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phsubw, 0x66380518, "phsubw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380518, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphsubw, 0x66380518, "vphsubw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 124 */
{OP_phsubd, 0x380618, "phsubd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[124][2]},
{INVALID, 0xf3380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phsubd, 0x66380618, "phsubd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380618, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphsubd, 0x66380618, "vphsubd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 125 */
{OP_phsubsw, 0x380718, "phsubsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[125][2]},
{INVALID, 0xf3380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_phsubsw, 0x66380718, "phsubsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380718, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vphsubsw, 0x66380718, "vphsubsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 126 */
{OP_psignb, 0x380818, "psignb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[126][2]},
{INVALID, 0xf3380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psignb, 0x66380818, "psignb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380818, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsignb, 0x66380818, "vpsignb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 127 */
{OP_psignw, 0x380918, "psignw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[127][2]},
{INVALID, 0xf3380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psignw, 0x66380918, "psignw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380918, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsignw, 0x66380918, "vpsignw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 128 */
{OP_psignd, 0x380a18, "psignd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[128][2]},
{INVALID, 0xf3380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_psignd, 0x66380a18, "psignd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380a18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpsignd, 0x66380a18, "vpsignd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 129 */
{OP_pmulhrsw, 0x380b18, "pmulhrsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[129][2]},
{INVALID, 0xf3380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pmulhrsw, 0x66380b18, "pmulhrsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x380b18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmulhrsw, 0x66380b18, "vpmulhrsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2380b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 130 */
{OP_pabsb, 0x381c18, "pabsb", Pq, xx, Qq, Pq, xx, mrm, x, tpe[130][2]},
{INVALID, 0xf3381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pabsb, 0x66381c18, "pabsb", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381c18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpabsb, 0x66381c18, "vpabsb", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 131 */
{OP_pabsw, 0x381d18, "pabsw", Pq, xx, Qq, Pq, xx, mrm, x, tpe[131][2]},
{INVALID, 0xf3381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pabsw, 0x66381d18, "pabsw", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381d18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpabsw, 0x66381d18, "vpabsw", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 132 */
{OP_pabsd, 0x381e18, "pabsd", Pq, xx, Qq, Pq, xx, mrm, x, tpe[132][2]},
{INVALID, 0xf3381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pabsd, 0x66381e18, "pabsd", Vdq, xx, Wdq, Vdq, xx, mrm, x, END_LIST},
{INVALID, 0xf2381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x381e18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf3381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpabsd, 0x66381e18, "vpabsd", Vx, xx, Hx, Wx, xx, mrm|vex, x, END_LIST},
{INVALID, 0xf2381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x66381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2381e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 133: all assumed to have Ib */
{OP_palignr, 0x3a0f18, "palignr", Pq, xx, Qq, Ib, Pq, mrm, x, tpe[133][2]},
{INVALID, 0xf33a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_palignr, 0x663a0f18, "palignr", Vdq, xx, Wdq, Ib, Vdq, mrm, x, END_LIST},
{INVALID, 0xf23a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x3a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, END_LIST},
{INVALID, 0xf33a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpalignr, 0x663a0f18, "vpalignr", Vx, xx, Hx, Wx, Ib, mrm|vex, x, END_LIST},
{INVALID, 0xf23a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x3a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf33a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x663a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf23a0f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 134 */
{OP_vmread, 0x0f7810, "vmread", Ed_q, xx, Gd_q, xx, xx, mrm|o64, x, END_LIST},
{INVALID, 0xf30f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* FIXME PR 338279: this is listed as /0 but I'm not going to chain it into
* the reg extensions table until I can verify, since gdb thinks it
* does NOT need /0. Waiting for a processor that actually supports it.
* It's ok for DR proper to think a non-cti instr is valid when really it's not,
* though for our decoding library use we should get it right.
*/
{OP_extrq, 0x660f7810, "extrq", Udq, xx, Ib, Ib, xx, mrm, x, tpe[135][2]},
/* FIXME: is src or dst Udq? */
{OP_insertq, 0xf20f7810, "insertq", Vdq, xx, Udq, Ib, Ib, mrm, x, tpe[135][3]},
{INVALID, 0x0f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 135 */
{OP_vmwrite, 0x0f7910, "vmwrite", Gd_q, xx, Ed_q, xx, xx, mrm|o64, x, END_LIST},
{INVALID, 0xf30f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* FIXME: is src or dst Udq? */
{OP_extrq, 0x660f7910, "extrq", Vdq, xx, Udq, xx, xx, mrm, x, END_LIST},
{OP_insertq, 0xf20f7910, "insertq", Vdq, xx, Udq, xx, xx, mrm, x, END_LIST},
{INVALID, 0x0f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f7910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 136 */
{OP_bsr, 0x0fbd10, "bsr", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, END_LIST},
/* XXX: if cpuid doesn't show lzcnt support, this is treated as bsr */
{OP_lzcnt, 0xf30fbd10, "lzcnt", Gv, xx, Ev, xx, xx, mrm, fW6, END_LIST},
/* This is bsr w/ DATA_PREFIX, which we indicate by omitting 0x66 (i#1118).
* It's not in the encoding chain. Ditto for 0xf2. If we keep the "all
* prefix ext marked invalid are really treated valid" we don't need these,
* but better to be explicit where we have to so we can easily remove that.
*/
{OP_bsr, 0x0fbd10, "bsr", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, NA},
{OP_bsr, 0x0fbd10, "bsr", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, NA},
{INVALID, 0x0fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fbd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 137 */
{OP_vmptrld, 0x0fc736, "vmptrld", xx, xx, Mq, xx, xx, mrm|o64, x, END_LIST},
{OP_vmxon, 0xf30fc736, "vmxon", xx, xx, Mq, xx, xx, mrm|o64, x, END_LIST},
{OP_vmclear, 0x660fc736, "vmclear", Mq, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{INVALID, 0xf20fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fc736, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 138 */
{OP_movbe, 0x38f018, "movbe", Gv, xx, Mv, xx, xx, mrm, x, tpe[139][0]},
{INVALID, 0xf338f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* really this is regular data-size prefix */
{OP_movbe, 0x6638f018, "movbe", Gw, xx, Mw, xx, xx, mrm, x, tpe[139][2]},
{OP_crc32, 0xf238f018, "crc32", Gv, xx, Eb, Gv, xx, mrm, x, END_LIST},
{INVALID, 0x38f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 139 */
{OP_movbe, 0x38f118, "movbe", Mv, xx, Gv, xx, xx, mrm, x, tpe[138][2]},
{INVALID, 0xf338f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* really this is regular data-size prefix */
{OP_movbe, 0x6638f118, "movbe", Mw, xx, Gw, xx, xx, mrm, x, END_LIST},
{OP_crc32, 0xf238f118, "crc32", Gv, xx, Ev, Gv, xx, mrm, x, tpe[138][3]},
{INVALID, 0x38f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* XXX: Intel Vol2B Sep2010 decode table claims crc32 has Gd
* instead of Gv, and that f2 f1 has Ey instead of Ev, and that
* there is a separate instruction with both 66 and f2 prefixes!
* But detail page doesn't corroborate that...
*/
},
{ /* prefix extension 140 */
{OP_bsf, 0x0fbc10, "bsf", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, END_LIST},
/* XXX: if cpuid doesn't show tzcnt support, this is treated as bsf */
{OP_tzcnt, 0xf30fbc10, "tzcnt", Gv, xx, Ev, xx, xx, mrm, fW6, END_LIST},
/* see OP_bsr comments above -- this is the same but for bsf: */
{OP_bsf, 0x0fbc10, "bsf", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, NA},
{OP_bsf, 0x0fbc10, "bsf", Gv, xx, Ev, xx, xx, mrm|predcx, fW6, NA},
{INVALID, 0x0fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20fbc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 141 */
{INVALID, 0x38f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_bextr, 0x38f718, "bextr", Gy, xx, Ey, By, xx, mrm|vex, fW6, txop[60]},
{OP_sarx, 0xf338f718, "sarx", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
{OP_shlx, 0x6638f718, "shlx", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
{OP_shrx, 0xf238f718, "shrx", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 142 */
{INVALID, 0x38f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_bzhi, 0x38f518, "bzhi", Gy, xx, Ey, By, xx, mrm|vex, fW6, END_LIST},
{INVALID, 0xf338f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pext, 0x6638f518, "pext", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
{OP_pdep, 0xf238f518, "pdep", Gy, xx, Ey, By, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 143 */
{INVALID, 0x38f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_adox, 0xf338f618, "adox", Gy, xx, Ey, Gy, xx, mrm, (fWO|fRO), END_LIST},
{OP_adcx, 0x6638f618, "adcx", Gy, xx, Ey, Gy, xx, mrm, (fWC|fRC), END_LIST},
{INVALID, 0xf238f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x38f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_mulx, 0xf238f618, "mulx", By, Gy, Ey, uDX, xx, mrm|vex, x, END_LIST},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x38f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf338f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x6638f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf238f618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 144 */
{INVALID, 0x0f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9010, "(vex_W ext 74)", xx, xx, xx, xx, xx, mrm|vex, x, 74},
{INVALID, 0xf30f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9010, "(vex_W ext 75)", xx, xx, xx, xx, xx, mrm|vex, x, 75},
{INVALID, 0xf20f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 145 */
{INVALID, 0x0f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9110, "(vex_W ext 76)", xx, xx, xx, xx, xx, mrm|vex, x, 76},
{INVALID, 0xf30f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9110, "(vex_W ext 77)", xx, xx, xx, xx, xx, mrm|vex, x, 77},
{INVALID, 0xf20f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 146 */
{INVALID, 0x0f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9210, "(vex_W ext 78)", xx, xx, xx, xx, xx, mrm|vex, x, 78},
{INVALID, 0xf30f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9210, "(vex_W ext 79)", xx, xx, xx, xx, xx, mrm|vex, x, 79},
{VEX_W_EXT, 0xf20f9210, "(vex_W ext 106)",xx, xx, xx, xx, xx, mrm|vex, x, 106},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 147 */
{INVALID, 0x0f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9310, "(vex_W ext 80)", xx, xx, xx, xx, xx, mrm|vex, x, 80},
{INVALID, 0xf30f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9310, "(vex_W ext 81)", xx, xx, xx, xx, xx, mrm|vex, x, 81},
{VEX_W_EXT, 0xf20f9310, "(vex_W ext 107)",xx, xx, xx, xx, xx, mrm|vex, x, 107},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 148 */
{INVALID, 0x0f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4110, "(vex_W ext 82)", xx, xx, xx, xx, xx, mrm|vex, x, 82},
{INVALID, 0xf30f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4110, "(vex_W ext 83)", xx, xx, xx, xx, xx, mrm|vex, x, 83},
{INVALID, 0xf20f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 149 */
{INVALID, 0x0f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4210, "(vex_W ext 84)", xx, xx, xx, xx, xx, mrm|vex, x, 84},
{INVALID, 0xf30f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4210, "(vex_W ext 85)", xx, xx, xx, xx, xx, mrm|vex, x, 85},
{INVALID, 0xf20f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 150 */
{INVALID, 0x0f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4b10, "(vex_W ext 86)", xx, xx, xx, xx, xx, mrm|vex, x, 86},
{INVALID, 0xf30f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4b10, "(vex_W ext 87)", xx, xx, xx, xx, xx, mrm|vex, x, 87},
{INVALID, 0xf20f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 151 */
{INVALID, 0x0f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4410, "(vex_W ext 88)", xx, xx, xx, xx, xx, mrm|vex, x, 88},
{INVALID, 0xf30f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4410, "(vex_W ext 89)", xx, xx, xx, xx, xx, mrm|vex, x, 89},
{INVALID, 0xf20f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 152 */
{INVALID, 0x0f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4510, "(vex_W ext 90)", xx, xx, xx, xx, xx, mrm|vex, x, 90},
{INVALID, 0xf30f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4510, "(vex_W ext 91)", xx, xx, xx, xx, xx, mrm|vex, x, 91},
{INVALID, 0xf20f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 153 */
{INVALID, 0x0f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4610, "(vex_W ext 92)", xx, xx, xx, xx, xx, mrm|vex, x, 92},
{INVALID, 0xf30f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4610, "(vex_W ext 93)", xx, xx, xx, xx, xx, mrm|vex, x, 93},
{INVALID, 0xf20f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 154 */
{INVALID, 0x0f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4710, "(vex_W ext 94)", xx, xx, xx, xx, xx, mrm|vex, x, 94},
{INVALID, 0xf30f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4710, "(vex_W ext 95)", xx, xx, xx, xx, xx, mrm|vex, x, 95},
{INVALID, 0xf20f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 155 */
{INVALID, 0x0f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f4a10, "(vex_W ext 96)", xx, xx, xx, xx, xx, mrm|vex, x, 96},
{INVALID, 0xf30f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f4a10, "(vex_W ext 97)", xx, xx, xx, xx, xx, mrm|vex, x, 97},
{INVALID, 0xf20f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 156 */
{INVALID, 0x0f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9810, "(vex_W ext 98)", xx, xx, xx, xx, xx, mrm|vex, x, 98},
{INVALID, 0xf30f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9810, "(vex_W ext 99)", xx, xx, xx, xx, xx, mrm|vex, x, 99},
{INVALID, 0xf20f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* prefix extension 157 */
{INVALID, 0x0f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x0f9910, "(vex_W ext 104)", xx, xx, xx, xx, xx, mrm|vex, x, 104},
{INVALID, 0xf30f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{VEX_W_EXT, 0x660f9910, "(vex_W ext 105)", xx, xx, xx, xx, xx, mrm|vex, x, 105},
{INVALID, 0xf20f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* TODO i#1312: Support AVX-512. */
{INVALID, 0x0f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf30f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x660f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf20f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Instructions that differ based on whether vex-encoded or not.
* Most of these require an 0x66 prefix but we use reqp for that
* so there's nothing inherent here about prefixes.
* TODO i#1312: A third row has been added for AVX-512 w/ EVEX prefix. Most or all
* EVEX instructions seem to resemble their corresponding VEX version. If we add
* a decode_table entry here, we currently can't test them throgh instr_create macros,
* unless we force the creation of EVEX versions.
*/
const instr_info_t e_vex_extensions[][3] = {
{ /* e_vex ext 0 */
{INVALID, 0x663a4a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vblendvps, 0x663a4a18, "vblendvps", Vx, xx, Hx,Wx,Lx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 1 */
{INVALID, 0x663a4b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vblendvpd, 0x663a4b18, "vblendvpd", Vx, xx, Hx,Wx,Lx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 2 */
{INVALID, 0x663a4c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpblendvb, 0x663a4c18, "vpblendvb", Vx, xx, Hx,Wx,Lx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 3 */
{OP_ptest, 0x66381718, "ptest", xx, xx, Vdq,Wdq, xx, mrm|reqp, fW6, END_LIST},
{OP_vptest, 0x66381718, "vptest", xx, xx, Vx,Wx, xx, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x66381718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 4 */
{OP_pmovsxbw, 0x66382018, "pmovsxbw", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxbw, 0x66382018, "vpmovsxbw", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 5 */
{OP_pmovsxbd, 0x66382118, "pmovsxbd", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxbd, 0x66382118, "vpmovsxbd", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 6 */
{OP_pmovsxbq, 0x66382218, "pmovsxbq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxbq, 0x66382218, "vpmovsxbq", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 7 */
{OP_pmovsxwd, 0x66382318, "pmovsxwd", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxwd, 0x66382318, "vpmovsxwd", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 8 */
{OP_pmovsxwq, 0x66382418, "pmovsxwq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxwq, 0x66382418, "vpmovsxwq", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 9 */
{OP_pmovsxdq, 0x66382518, "pmovsxdq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovsxdq,0x66382518, "vpmovsxdq", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 10 */
{OP_pmuldq, 0x66382818, "pmuldq", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmuldq, 0x66382818, "vpmuldq", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 11 */
{OP_pcmpeqq, 0x66382918, "pcmpeqq", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpcmpeqq, 0x66382918, "vpcmpeqq", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 12 */
{OP_movntdqa, 0x66382a18, "movntdqa", Mdq, xx, Vdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vmovntdqa, 0x66382a18, "vmovntdqa", Mx, xx, Vx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 13 */
{OP_packusdw, 0x66382b18, "packusdw", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpackusdw, 0x66382b18, "vpackusdw", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66382b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 14 */
{OP_pmovzxbw, 0x66383018, "pmovzxbw", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxbw, 0x66383018, "vpmovzxbw", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 15 */
{OP_pmovzxbd, 0x66383118, "pmovzxbd", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxbd, 0x66383118, "vpmovzxbd", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 16 */
{OP_pmovzxbq, 0x66383218, "pmovzxbq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxbq, 0x66383218, "vpmovzxbq", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 17 */
{OP_pmovzxwd, 0x66383318, "pmovzxwd", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxwd, 0x66383318, "vpmovzxwd", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 18 */
{OP_pmovzxwq, 0x66383418, "pmovzxwq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxwq, 0x66383418, "vpmovzxwq", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 19 */
{OP_pmovzxdq, 0x66383518, "pmovzxdq", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vpmovzxdq, 0x66383518, "vpmovzxdq", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 20 */
{OP_pcmpgtq, 0x66383718, "pcmpgtq", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpcmpgtq, 0x66383718, "vpcmpgtq", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 21 */
{OP_pminsb, 0x66383818, "pminsb", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpminsb, 0x66383818, "vpminsb", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 22 */
{OP_pminsd, 0x66383918, "pminsd", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpminsd, 0x66383918, "vpminsd", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 23 */
{OP_pminuw, 0x66383a18, "pminuw", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpminuw, 0x66383a18, "vpminuw", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 24 */
{OP_pminud, 0x66383b18, "pminud", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpminud, 0x66383b18, "vpminud", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 25 */
{OP_pmaxsb, 0x66383c18, "pmaxsb", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmaxsb, 0x66383c18, "vpmaxsb", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 26 */
{OP_pmaxsd, 0x66383d18, "pmaxsd", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmaxsd, 0x66383d18, "vpmaxsd", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 27 */
{OP_pmaxuw, 0x66383e18, "pmaxuw", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmaxuw, 0x66383e18, "vpmaxuw", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 28 */
{OP_pmaxud, 0x66383f18, "pmaxud", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmaxud, 0x66383f18, "vpmaxud", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66383f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 29 */
{OP_pmulld, 0x66384018, "pmulld", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vpmulld, 0x66384018, "vpmulld", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66384018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 30 */
{OP_phminposuw, 0x66384118,"phminposuw",Vdq,xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vphminposuw, 0x66384118,"vphminposuw",Vdq,xx, Wdq, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66384118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 31 */
{OP_aesimc, 0x6638db18, "aesimc", Vdq, xx, Wdq, xx, xx, mrm|reqp, x, END_LIST},
{OP_vaesimc, 0x6638db18, "vaesimc", Vdq, xx, Wdq, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638db18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 32 */
{OP_aesenc, 0x6638dc18, "aesenc", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vaesenc, 0x6638dc18, "vaesenc", Vdq, xx, Hdq,Wdq, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638dc18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 33 */
{OP_aesenclast, 0x6638dd18,"aesenclast",Vdq,xx,Wdq,Vdq,xx, mrm|reqp, x, END_LIST},
{OP_vaesenclast, 0x6638dd18,"vaesenclast",Vdq,xx,Hdq,Wdq,xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638dd18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 34 */
{OP_aesdec, 0x6638de18, "aesdec", Vdq, xx, Wdq,Vdq, xx, mrm|reqp, x, END_LIST},
{OP_vaesdec, 0x6638de18, "vaesdec", Vdq, xx, Hdq,Wdq, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638de18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 35 */
{OP_aesdeclast, 0x6638df18,"aesdeclast",Vdq,xx,Wdq,Vdq,xx, mrm|reqp, x, END_LIST},
{OP_vaesdeclast, 0x6638df18,"vaesdeclast",Vdq,xx,Hdq,Wdq,xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x6638df18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 36 */
{OP_pextrb, 0x663a1418, "pextrb", Rd_Mb, xx, Vb_dq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vpextrb, 0x663a1418, "vpextrb", Rd_Mb, xx, Vb_dq, Ib, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a1418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 37 */
{OP_pextrw, 0x663a1518, "pextrw", Rd_Mw, xx, Vw_dq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vpextrw, 0x663a1518, "vpextrw", Rd_Mw, xx, Vw_dq, Ib, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a1518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 38 */
{OP_pextrd, 0x663a1618, "pextrd", Ed_q, xx, Vd_q_dq, Ib, xx, mrm|reqp, x, END_LIST},/*"pextrq" with rex.w*/
{OP_vpextrd, 0x663a1618, "vpextrd", Ed_q, xx, Vd_q_dq, Ib, xx, mrm|vex|reqp, x, END_LIST},/*"vpextrq" with rex.w*/
{INVALID, 0x663a1618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 39 */
{OP_extractps, 0x663a1718, "extractps", Ed, xx, Vd_dq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vextractps, 0x663a1718, "vextractps", Ed, xx, Vd_dq, Ib, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a1718, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 40 */
{OP_roundps, 0x663a0818, "roundps", Vdq, xx, Wdq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vroundps, 0x663a0818, "vroundps", Vx, xx, Wx, Ib, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 41 */
{OP_roundpd, 0x663a0918, "roundpd", Vdq, xx, Wdq, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vroundpd, 0x663a0918, "vroundpd", Vx, xx, Wx, Ib, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 42 */
{OP_roundss, 0x663a0a18, "roundss", Vss, xx, Wss, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vroundss, 0x663a0a18, "vroundss", Vdq, xx, H12_dq, Wss, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 43 */
{OP_roundsd, 0x663a0b18, "roundsd", Vsd, xx, Wsd, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vroundsd, 0x663a0b18, "vroundsd", Vdq, xx, Hsd, Wsd, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0b18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 44 */
{OP_blendps, 0x663a0c18, "blendps", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vblendps, 0x663a0c18, "vblendps", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 45 */
{OP_blendpd, 0x663a0d18, "blendpd", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vblendpd, 0x663a0d18, "vblendpd", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 46 */
{OP_pblendw, 0x663a0e18, "pblendw", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vpblendw, 0x663a0e18, "vpblendw", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 47 */
/* FIXME i#1388: pinsrb actually reads only bottom byte of reg */
{OP_pinsrb, 0x663a2018, "pinsrb", Vb_dq, xx, Rd_Mb, Ib, xx, mrm|reqp, x, END_LIST},
{OP_vpinsrb, 0x663a2018, "vpinsrb", Vdq, xx, H15_dq, Rd_Mb, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a2018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 48 */
{OP_insertps, 0x663a2118, "insertps", Vdq,xx,Udq_Md,Ib, xx, mrm|reqp, x, END_LIST},
{OP_vinsertps,0x663a2118, "vinsertps", Vdq,xx,Hdq,Udq_Md,Ib, mrm|vex|reqp|reqL0, x, END_LIST},
{INVALID, 0x663a2118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 49 */
{OP_pinsrd, 0x663a2218, "pinsrd", Vd_q_dq, xx, Ed_q,Ib, xx, mrm|reqp, x, END_LIST},/*"pinsrq" with rex.w*/
{OP_vpinsrd, 0x663a2218, "vpinsrd", Vdq, xx, H12_8_dq, Ed_q, Ib, mrm|vex|reqp, x, END_LIST},/*"vpinsrq" with rex.w*/
{INVALID, 0x663a2218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 50 */
{OP_dpps, 0x663a4018, "dpps", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vdpps, 0x663a4018, "vdpps", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 51 */
{OP_dppd, 0x663a4118, "dppd", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vdppd, 0x663a4118, "vdppd", Vdq, xx, Hdq, Wdq, Ib, mrm|vex|reqp|reqL0, x, END_LIST},
{INVALID, 0x663a4118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 52 */
{OP_mpsadbw, 0x663a4218, "mpsadbw", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vmpsadbw, 0x663a4218, "vmpsadbw", Vx, xx, Hx, Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 53 */
{OP_pcmpestrm, 0x663a6018, "pcmpestrm",xmm0, xx, Vdq, Wdq, Ib, mrm|reqp|xop, fW6, exop[8]},
{OP_vpcmpestrm,0x663a6018, "vpcmpestrm",xmm0, xx, Vdq, Wdq, Ib, mrm|vex|reqp|xop, fW6, exop[11]},
{INVALID, 0x663a6018, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 54 */
{OP_pcmpestri, 0x663a6118, "pcmpestri",ecx, xx, Vdq, Wdq, Ib, mrm|reqp|xop, fW6, exop[9]},
{OP_vpcmpestri,0x663a6118, "vpcmpestri",ecx, xx, Vdq, Wdq, Ib, mrm|vex|reqp|xop, fW6, exop[12]},
{INVALID, 0x663a6118, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 55 */
{OP_pcmpistrm, 0x663a6218, "pcmpistrm",xmm0, xx, Vdq, Wdq, Ib, mrm|reqp, fW6, END_LIST},
{OP_vpcmpistrm,0x663a6218, "vpcmpistrm",xmm0, xx, Vdq, Wdq, Ib, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x663a6218, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 56 */
{OP_pcmpistri, 0x663a6318, "pcmpistri",ecx, xx, Vdq, Wdq, Ib, mrm|reqp, fW6, END_LIST},
{OP_vpcmpistri,0x663a6318, "vpcmpistri",ecx, xx, Vdq, Wdq, Ib, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x663a6318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 57 */
{OP_pclmulqdq, 0x663a4418, "pclmulqdq", Vdq, xx, Wdq, Ib, Vdq, mrm|reqp, x, END_LIST},
{OP_vpclmulqdq,0x663a4418, "vpclmulqdq", Vdq, xx, Hdq, Wdq, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a4418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 58 */
{OP_aeskeygenassist, 0x663adf18, "aeskeygenassist",Vdq,xx,Wdq,Ib,xx,mrm|reqp,x,END_LIST},
{OP_vaeskeygenassist,0x663adf18, "vaeskeygenassist",Vdq,xx,Wdq,Ib,xx,mrm|vex|reqp,x,END_LIST},
{INVALID, 0x663adf18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 59 */
{INVALID, 0x66380e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vtestps, 0x66380e18, "vtestps", xx, xx, Vx,Wx, xx, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x66380e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 60 */
{INVALID, 0x66380f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vtestpd, 0x66380f18, "vtestpd", xx, xx, Vx,Wx, xx, mrm|vex|reqp, fW6, END_LIST},
{INVALID, 0x66380f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 61 */
{OP_ldmxcsr, 0x0fae32, "ldmxcsr", xx, xx, Md, xx, xx, mrm, x, END_LIST},
{OP_vldmxcsr, 0x0fae32, "vldmxcsr", xx, xx, Md, xx, xx, mrm|vex|reqL0, x, END_LIST},
{INVALID, 0x0fae32, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 62 */
{OP_stmxcsr, 0x0fae33, "stmxcsr", Md, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_vstmxcsr, 0x0fae33, "vstmxcsr", Md, xx, xx, xx, xx, mrm|vex, x, END_LIST},
{INVALID, 0x0fae33, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 63 */
{INVALID, 0x66381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtph2ps, 0x66381318, "vcvtph2ps", Vx, xx, Wx, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66381318, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 64 */
{INVALID, 0x66381818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbroadcastss, 0x66381818, "vbroadcastss", Vx, xx, Wd_dq, xx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66381818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 65 */
{INVALID, 0x66381918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbroadcastsd, 0x66381918, "vbroadcastsd", Vqq, xx, Wq_dq, xx, xx, mrm|vex|reqp|reqL1, x, END_LIST},
{INVALID, 0x66381918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 66 */
{INVALID, 0x66381a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbroadcastf128, 0x66381a18, "vbroadcastf128", Vqq, xx, Mdq, xx, xx, mrm|vex|reqp|reqL1, x, END_LIST},
{INVALID, 0x66381a18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 67 */
{INVALID, 0x66382c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaskmovps, 0x66382c18, "vmaskmovps", Vx, xx, Hx,Mx, xx, mrm|vex|reqp|predcx, x, tvex[69][1]},
{INVALID, 0x66382c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 68 */
{INVALID, 0x66382d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaskmovpd, 0x66382d18, "vmaskmovpd", Vx, xx, Hx,Mx, xx, mrm|vex|reqp|predcx, x, tvex[70][1]},
{INVALID, 0x66382d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 69 */
{INVALID, 0x66382e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaskmovps, 0x66382e18, "vmaskmovps", Mx, xx, Hx,Vx, xx, mrm|vex|reqp|predcx, x, END_LIST},
{INVALID, 0x66382e18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 70 */
{INVALID, 0x66382f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaskmovpd, 0x66382f18, "vmaskmovpd", Mx, xx, Hx,Vx, xx, mrm|vex|reqp|predcx, x, END_LIST},
{INVALID, 0x66382f18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 71 */
{INVALID, 0x663a0418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpermilps, 0x663a0418, "vpermilps", Vx, xx, Wx, Ib, xx, mrm|vex|reqp, x, tvex[77][1]},
{INVALID, 0x663a0418, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 72 */
{INVALID, 0x663a0518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpermilpd, 0x663a0518, "vpermilpd", Vx, xx, Wx, Ib, xx, mrm|vex|reqp, x, tvex[78][1]},
{INVALID, 0x663a0518, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 73 */
{INVALID, 0x663a0618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vperm2f128, 0x663a0618, "vperm2f128", Vx, xx, Hx,Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a0618, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 74 */
{INVALID, 0x663a1818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vinsertf128, 0x663a1818, "vinsertf128", Vx, xx, Hx,Wx, Ib, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a1818, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 75 */
{INVALID, 0x663a1918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vextractf128, 0x663a1918, "vextractf128", Wdq, xx, Vdq_qq, Ib, xx, mrm|vex|reqp|reqL1, x, END_LIST},
{INVALID, 0x663a1918, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 76 */
{INVALID, 0x663a1d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvtps2ph, 0x663a1d18, "vcvtps2ph", Wx, xx, Vx, Ib, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x663a1d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 77 */
{INVALID, 0x66380c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpermilps, 0x66380c18, "vpermilps", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66380c18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 78 */
{INVALID, 0x66380d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpermilpd, 0x66380d18, "vpermilpd", Vx, xx, Hx,Wx, xx, mrm|vex|reqp, x, END_LIST},
{INVALID, 0x66380d18, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 79 */
{OP_seto, 0x0f9010, "seto", Eb, xx, xx, xx, xx, mrm, fRO, END_LIST},
{PREFIX_EXT, 0x0f9010, "(prefix ext 144)", xx, xx, xx, xx, xx, mrm, x, 144},
{INVALID, 0x0f9010, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 80 */
{OP_setno, 0x0f9110, "setno", Eb, xx, xx, xx, xx, mrm, fRO, END_LIST},
{PREFIX_EXT, 0x0f9110, "(prefix ext 145)", xx, xx, xx, xx, xx, mrm, x, 145},
{INVALID, 0x0f9110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 81 */
{OP_setb, 0x0f9210, "setb", Eb, xx, xx, xx, xx, mrm, fRC, END_LIST},
{PREFIX_EXT, 0x0f9210, "(prefix ext 146)", xx, xx, xx, xx, xx, mrm, x, 146},
{INVALID, 0x0f9210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 82 */
{OP_setnb, 0x0f9310, "setnb", Eb, xx, xx, xx, xx, mrm, fRC, END_LIST},
{PREFIX_EXT, 0x0f9310, "(prefix ext 147)", xx, xx, xx, xx, xx, mrm, x, 147},
{INVALID, 0x0f9310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 83 */
{OP_cmovno, 0x0f4110, "cmovno", Gv, xx, Ev, xx, xx, mrm|predcc, fRO, END_LIST},
{PREFIX_EXT, 0x0f4110, "(prefix ext 148)", xx, xx, xx, xx, xx, mrm, x, 148},
{INVALID, 0x0f4110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 84 */
{OP_cmovb, 0x0f4210, "cmovb", Gv, xx, Ev, xx, xx, mrm|predcc, fRC, END_LIST},
{PREFIX_EXT, 0x0f4210, "(prefix ext 149)", xx, xx, xx, xx, xx, mrm, x, 149},
{INVALID, 0x0f4210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 85 */
{OP_cmovnp, 0x0f4b10, "cmovnp", Gv, xx, Ev, xx, xx, mrm|predcc, fRP, END_LIST},
{PREFIX_EXT, 0x0f4b10, "(prefix ext 150)", xx, xx, xx, xx, xx, mrm, x, 150},
{INVALID, 0x0f4b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 86 */
{OP_cmovz, 0x0f4410, "cmovz", Gv, xx, Ev, xx, xx, mrm|predcc, fRZ, END_LIST},
{PREFIX_EXT, 0x0f4410, "(prefix ext 151)", xx, xx, xx, xx, xx, mrm, x, 151},
{INVALID, 0x0f4410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 87 */
{OP_cmovnz, 0x0f4510, "cmovnz", Gv, xx, Ev, xx, xx, mrm|predcc, fRZ, END_LIST},
{PREFIX_EXT, 0x0f4510, "(prefix ext 152)", xx, xx, xx, xx, xx, mrm, x, 152},
{INVALID, 0x0f4510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 88 */
{OP_cmovbe, 0x0f4610, "cmovbe", Gv, xx, Ev, xx, xx, mrm|predcc, (fRC|fRZ), END_LIST},
{PREFIX_EXT, 0x0f4610, "(prefix ext 153)", xx, xx, xx, xx, xx, mrm, x, 153},
{INVALID, 0x0f4610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 89 */
{OP_cmovnbe, 0x0f4710, "cmovnbe", Gv, xx, Ev, xx, xx, mrm|predcc, (fRC|fRZ), END_LIST},
{PREFIX_EXT, 0x0f4710, "(prefix ext 154)", xx, xx, xx, xx, xx, mrm, x, 154},
{INVALID, 0x0f4710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 90 */
{OP_cmovp, 0x0f4a10, "cmovp", Gv, xx, Ev, xx, xx, mrm|predcc, fRP, END_LIST},
{PREFIX_EXT, 0x0f4a10, "(prefix ext 155)", xx, xx, xx, xx, xx, mrm, x, 155},
{INVALID, 0x0f4a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 91 */
{OP_sets, 0x0f9810, "sets", Eb, xx, xx, xx, xx, mrm, fRS, END_LIST},
{PREFIX_EXT, 0x0f9810, "(prefix ext 156)", xx, xx, xx, xx, xx, mrm, x, 156},
{INVALID, 0x0f9810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* e_vex ext 92 */
{OP_setns, 0x0f9910, "setns", Eb, xx, xx, xx, xx, mrm, fRS, END_LIST},
{PREFIX_EXT, 0x0f9910, "(prefix ext 157)", xx, xx, xx, xx, xx, mrm, x, 157},
{INVALID, 0x0f9910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Instructions that differ depending on mod and rm bits in modrm byte
* For mod, entry 0 is all mem ref mod values (0,1,2) while entry 1 is 3.
* For the mem ref, we give just one of the 3 possible modrm bytes
* (we only use it when encoding so we don't need all 3).
*/
const instr_info_t mod_extensions[][2] = {
{ /* mod extension 0 */
{OP_sgdt, 0x0f0130, "sgdt", Ms, xx, xx, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0171, "(group 7 mod + rm ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},
},
{ /* mod extension 1 */
{OP_sidt, 0x0f0131, "sidt", Ms, xx, xx, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0171, "(group 7 mod + rm ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},
},
{ /* mod extension 2 */
{OP_invlpg, 0x0f0137, "invlpg", xx, xx, Mm, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0177, "(group 7 mod + rm ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},
},
{ /* mod extension 3 */
{OP_clflush, 0x0fae37, "clflush", xx, xx, Mb, xx, xx, mrm, x, END_LIST},
{OP_sfence, 0xf80fae77, "sfence", xx, xx, xx, xx, xx, mrm, x, END_LIST},
},
{ /* mod extension 4 */
{OP_lidt, 0x0f0133, "lidt", xx, xx, Ms, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0173, "(group 7 mod + rm ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},
},
{ /* mod extension 5 */
{OP_lgdt, 0x0f0132, "lgdt", xx, xx, Ms, xx, xx, mrm, x, END_LIST},
{RM_EXT, 0x0f0172, "(group 7 mod + rm ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},
},
{ /* mod extension 6 */
{REX_W_EXT, 0x0fae35, "(rex.w ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},
/* note that gdb thinks e9-ef are "lfence (bad)" (PR 239920) */
{OP_lfence, 0xe80fae75, "lfence", xx, xx, xx, xx, xx, mrm, x, END_LIST},
},
{ /* mod extension 7 */
{REX_W_EXT, 0x0fae36, "(rex.w ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},
{OP_mfence, 0xf00fae76, "mfence", xx, xx, xx, xx, xx, mrm, x, END_LIST},
},
{ /* mod extension 8 */
{OP_vmovss, 0xf30f1010, "vmovss", Vss, xx, Wss, xx, xx, mrm|vex, x, modx[10][0]},
{OP_vmovss, 0xf30f1010, "vmovss", Vdq, xx, H12_dq, Uss, xx, mrm|vex, x, modx[10][1]},
},
{ /* mod extension 9 */
{OP_vmovsd, 0xf20f1010, "vmovsd", Vsd, xx, Wsd, xx, xx, mrm|vex, x, modx[11][0]},
{OP_vmovsd, 0xf20f1010, "vmovsd", Vdq, xx, Hsd, Usd, xx, mrm|vex, x, modx[11][1]},
},
{ /* mod extension 10 */
{OP_vmovss, 0xf30f1110, "vmovss", Wss, xx, Vss, xx, xx, mrm|vex, x, modx[ 8][1]},
{OP_vmovss, 0xf30f1110, "vmovss", Udq, xx, H12_dq, Vss, xx, mrm|vex, x, modx[20][0]},
},
{ /* mod extension 11 */
{OP_vmovsd, 0xf20f1110, "vmovsd", Wsd, xx, Vsd, xx, xx, mrm|vex, x, modx[ 9][1]},
{OP_vmovsd, 0xf20f1110, "vmovsd", Udq, xx, Hsd, Vsd, xx, mrm|vex, x, modx[21][0]},
},
{ /* mod extension 12 */
{PREFIX_EXT, 0x0fc736, "(prefix ext 137)", xx, xx, xx, xx, xx, no, x, 137},
{OP_rdrand, 0x0fc736, "rdrand", Rv, xx, xx, xx, xx, mrm, fW6, END_LIST},
},
{ /* mod extension 13 */
/* The latest Intel table implies 0x66 prefix makes invalid instr but not worth
* explicitly encoding that until we have more information.
*/
{OP_vmptrst, 0x0fc737, "vmptrst", Mq, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_rdseed, 0x0fc737, "rdseed", Rv, xx, xx, xx, xx, mrm, fW6, END_LIST},
},
{ /* mod extension 14 */
{REX_W_EXT, 0x0fae30, "(rex.w ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},
/* Using reqp to avoid having to create a whole prefix_ext entry for one opcode.
* Ditto below.
*/
{OP_rdfsbase,0xf30fae30, "rdfsbase", Ry, xx, xx, xx, xx, mrm|o64|reqp, x, END_LIST},
},
{ /* mod extension 15 */
{REX_W_EXT, 0x0fae31, "(rex.w ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},
{OP_rdgsbase,0xf30fae31, "rdgsbase", Ry, xx, xx, xx, xx, mrm|o64|reqp, x, END_LIST},
},
{ /* mod extension 16 */
{E_VEX_EXT, 0x0fae32, "(e_vex ext 61)", xx, xx, xx, xx, xx, mrm, x, 61},
{OP_wrfsbase,0xf30fae32, "wrfsbase", xx, xx, Ry, xx, xx, mrm|o64|reqp, x, END_LIST},
},
{ /* mod extension 17 */
{E_VEX_EXT, 0x0fae33, "(e_vex ext 62)", xx, xx, xx, xx, xx, mrm, x, 62},
{OP_wrgsbase,0xf30fae33, "wrgsbase", xx, xx, Ry, xx, xx, mrm|o64|reqp, x, END_LIST},
},
{ /* mod extension 18 */
/* load from memory zeroes top bits */
{OP_movss, 0xf30f1010, "movss", Vdq, xx, Mss, xx, xx, mrm, x, modx[18][1]},
{OP_movss, 0xf30f1010, "movss", Vss, xx, Uss, xx, xx, mrm, x, tpe[1][1]},
},
{ /* mod extension 19 */
/* load from memory zeroes top bits */
{OP_movsd, 0xf20f1010, "movsd", Vdq, xx, Msd, xx, xx, mrm, x, modx[19][1]},
{OP_movsd, 0xf20f1010, "movsd", Vsd, xx, Usd, xx, xx, mrm, x, tpe[1][3]},
},
{ /* mod extension 20 */
{OP_vmovss, 0xf30f1010, "vmovss", Vss, xx, KEb, Wss, xx, mrm|evex, x, modx[22][0]},
{OP_vmovss, 0xf30f1010, "vmovss", Vdq, xx, KEb, H12_dq, Uss, mrm|evex, x, modx[22][1]},
},
{ /* mod extension 21 */
{OP_vmovsd, 0xf20f1010, "vmovsd", Vsd, xx, KEb, Wsd, xx, mrm|evex, x, modx[23][0]},
{OP_vmovsd, 0xf20f1010, "vmovsd", Vdq, xx, KEb, Hsd, Usd, mrm|evex, x, modx[23][1]},
},
{ /* mod extension 22 */
{OP_vmovss, 0xf30f1110, "vmovss", Wss, xx, KEb, Vss, xx, mrm|evex, x, modx[20][1]},
{OP_vmovss, 0xf30f1110, "vmovss", Udq, xx, KEb, H12_dq, Vss, mrm|evex, x, END_LIST},
},
{ /* mod extension 23 */
{OP_vmovsd, 0xf20f1110, "vmovsd", Wsd, xx, KEb, Vsd, xx, mrm|evex, x, modx[21][1]},
{OP_vmovsd, 0xf20f1110, "vmovsd", Udq, xx, KEb, Hsd, Vsd, mrm|evex, x, END_LIST},
},
};
/* Naturally all of these have modrm bytes even if they have no explicit operands */
const instr_info_t rm_extensions[][8] = {
{ /* rm extension 0 */
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmcall, 0xc10f0171, "vmcall", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_vmlaunch, 0xc20f0171, "vmlaunch", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_vmresume, 0xc30f0171, "vmresume", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_vmxoff, 0xc40f0171, "vmxoff", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* rm extension 1 */
{OP_monitor, 0xc80f0171, "monitor", xx, xx, eax, ecx, edx, mrm, x, END_LIST},
{OP_mwait, 0xc90f0171, "mwait", xx, xx, eax, ecx, xx, mrm, x, END_LIST},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* rm extension 2 */
{OP_swapgs, 0xf80f0177, "swapgs", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
{OP_rdtscp, 0xf90f0177, "rdtscp", edx, eax, xx, xx, xx, mrm|xop, x, exop[10]},/*AMD-only*/
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* rm extension 3 */
{OP_vmrun, 0xd80f0173, "vmrun", xx, xx, axAX, xx, xx, mrm, x, END_LIST},
{OP_vmmcall,0xd90f0173, "vmmcall", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_vmload, 0xda0f0173, "vmload", xx, xx, axAX, xx, xx, mrm, x, END_LIST},
{OP_vmsave, 0xdb0f0173, "vmsave", xx, xx, axAX, xx, xx, mrm, x, END_LIST},
{OP_stgi, 0xdc0f0173, "stgi", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_clgi, 0xdd0f0173, "clgi", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_skinit, 0xde0f0173, "skinit", xx, xx, eax, xx, xx, mrm, x, END_LIST},
{OP_invlpga,0xdf0f0173, "invlpga", xx, xx, axAX, ecx, xx, mrm, x, END_LIST},
},
{ /* rm extension 4 */
{OP_xgetbv, 0xd00f0172, "xgetbv", edx, eax, ecx, xx, xx, mrm, x, END_LIST},
{OP_xsetbv, 0xd10f0172, "xsetbv", xx, xx, ecx, edx, eax, mrm, x, END_LIST},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmfunc, 0xd40f0172, "vmfunc", xx, xx, xx, xx, xx, mrm|o64, x, END_LIST},
/* Only if the transaction fails does xend write to eax => predcx.
* XXX i#1314: on failure eip is also written to.
*/
{OP_xend, 0xd50f0172, "xend", eax, xx, xx, xx, xx, mrm|predcx, x, NA},
{OP_xtest, 0xd60f0172, "xtest", xx, xx, xx, xx, xx, mrm, fW6, NA},
{INVALID, 0x0f0131, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Instructions that differ depending on whether in 64-bit mode
*/
const instr_info_t x64_extensions[][2] = {
{ /* x64_ext 0 */
{OP_inc, 0x400000, "inc", zAX, xx, zAX, xx, xx, i64, (fW6&(~fWC)), t64e[1][0]},
{PREFIX, 0x400000, "rex", xx, xx, xx, xx, xx, no, x, PREFIX_REX_GENERAL},
}, { /* x64_ext 1 */
{OP_inc, 0x410000, "inc", zCX, xx, zCX, xx, xx, i64, (fW6&(~fWC)), t64e[2][0]},
{PREFIX, 0x410000, "rex.b", xx, xx, xx, xx, xx, no, x, PREFIX_REX_B},
}, { /* x64_ext 2 */
{OP_inc, 0x420000, "inc", zDX, xx, zDX, xx, xx, i64, (fW6&(~fWC)), t64e[3][0]},
{PREFIX, 0x420000, "rex.x", xx, xx, xx, xx, xx, no, x, PREFIX_REX_X},
}, { /* x64_ext 3 */
{OP_inc, 0x430000, "inc", zBX, xx, zBX, xx, xx, i64, (fW6&(~fWC)), t64e[4][0]},
{PREFIX, 0x430000, "rex.xb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_X|PREFIX_REX_B},
}, { /* x64_ext 4 */
{OP_inc, 0x440000, "inc", zSP, xx, zSP, xx, xx, i64, (fW6&(~fWC)), t64e[5][0]},
{PREFIX, 0x440000, "rex.r", xx, xx, xx, xx, xx, no, x, PREFIX_REX_R},
}, { /* x64_ext 5 */
{OP_inc, 0x450000, "inc", zBP, xx, zBP, xx, xx, i64, (fW6&(~fWC)), t64e[6][0]},
{PREFIX, 0x450000, "rex.rb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_R|PREFIX_REX_B},
}, { /* x64_ext 6 */
{OP_inc, 0x460000, "inc", zSI, xx, zSI, xx, xx, i64, (fW6&(~fWC)), t64e[7][0]},
{PREFIX, 0x460000, "rex.rx", xx, xx, xx, xx, xx, no, x, PREFIX_REX_R|PREFIX_REX_X},
}, { /* x64_ext 7 */
{OP_inc, 0x470000, "inc", zDI, xx, zDI, xx, xx, i64, (fW6&(~fWC)), tex[12][0]},
{PREFIX, 0x470000, "rex.rxb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_R|PREFIX_REX_X|PREFIX_REX_B},
}, { /* x64_ext 8 */
{OP_dec, 0x480000, "dec", zAX, xx, zAX, xx, xx, i64, (fW6&(~fWC)), t64e[9][0]},
{PREFIX, 0x480000, "rex.w", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W},
}, { /* x64_ext 9 */
{OP_dec, 0x490000, "dec", zCX, xx, zCX, xx, xx, i64, (fW6&(~fWC)), t64e[10][0]},
{PREFIX, 0x490000, "rex.wb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_B},
}, { /* x64_ext 10 */
{OP_dec, 0x4a0000, "dec", zDX, xx, zDX, xx, xx, i64, (fW6&(~fWC)), t64e[11][0]},
{PREFIX, 0x4a0000, "rex.wx", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_X},
}, { /* x64_ext 11 */
{OP_dec, 0x4b0000, "dec", zBX, xx, zBX, xx, xx, i64, (fW6&(~fWC)), t64e[12][0]},
{PREFIX, 0x4b0000, "rex.wxb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_X|PREFIX_REX_B},
}, { /* x64_ext 12 */
{OP_dec, 0x4c0000, "dec", zSP, xx, zSP, xx, xx, i64, (fW6&(~fWC)), t64e[13][0]},
{PREFIX, 0x4c0000, "rex.wr", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_R},
}, { /* x64_ext 13 */
{OP_dec, 0x4d0000, "dec", zBP, xx, zBP, xx, xx, i64, (fW6&(~fWC)), t64e[14][0]},
{PREFIX, 0x4d0000, "rex.wrb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_R|PREFIX_REX_B},
}, { /* x64_ext 14 */
{OP_dec, 0x4e0000, "dec", zSI, xx, zSI, xx, xx, i64, (fW6&(~fWC)), t64e[15][0]},
{PREFIX, 0x4e0000, "rex.wrx", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_R|PREFIX_REX_X},
}, { /* x64_ext 15 */
{OP_dec, 0x4f0000, "dec", zDI, xx, zDI, xx, xx, i64, (fW6&(~fWC)), tex[12][1]},
{PREFIX, 0x4f0000, "rex.wrxb", xx, xx, xx, xx, xx, no, x, PREFIX_REX_W|PREFIX_REX_R|PREFIX_REX_X|PREFIX_REX_B},
}, { /* x64_ext 16 */
{OP_arpl, 0x630000, "arpl", Ew, xx, Gw, xx, xx, mrm|i64, fWZ, END_LIST},
{OP_movsxd, 0x630000, "movsxd", Gv, xx, Ed, xx, xx, mrm|o64, x, END_LIST},
},
};
/****************************************************************************
* Instructions that differ depending on the first two bits of the 2nd byte,
* or whether in x64 mode.
*/
const instr_info_t vex_prefix_extensions[][2] = {
{ /* vex_prefix_ext 0 */
{OP_les, 0xc40000, "les", Gz, es, Mp, xx, xx, mrm|i64, x, END_LIST},
{PREFIX, 0xc40000, "vex+2b", xx, xx, xx, xx, xx, no, x, PREFIX_VEX_3B},
}, { /* vex_prefix_ext 1 */
{OP_lds, 0xc50000, "lds", Gz, ds, Mp, xx, xx, mrm|i64, x, END_LIST},
{PREFIX, 0xc50000, "vex+1b", xx, xx, xx, xx, xx, no, x, PREFIX_VEX_2B},
},
};
/****************************************************************************
* Instructions that differ depending on bits 4 and 5 of the 2nd byte.
*/
const instr_info_t xop_prefix_extensions[][2] = {
{ /* xop_prefix_ext 0 */
{EXTENSION, 0x8f0000, "(group 1d)", xx, xx, xx, xx, xx, mrm, x, 26},
{PREFIX, 0x8f0000, "xop", xx, xx, xx, xx, xx, no, x, PREFIX_XOP},
},
};
/****************************************************************************
* Instructions that differ depending on whether vex-encoded and vex.L
* Index 0 = no vex, 1 = vex and vex.L=0, 2 = vex and vex.L=1
*/
const instr_info_t vex_L_extensions[][3] = {
{ /* vex_L_ext 0 */
{OP_emms, 0x0f7710, "emms", xx, xx, xx, xx, xx, no, x, END_LIST},
{OP_vzeroupper, 0x0f7710, "vzeroupper", xx, xx, xx, xx, xx, vex, x, END_LIST},
{OP_vzeroall, 0x0f7790, "vzeroall", xx, xx, xx, xx, xx, vex, x, END_LIST},
},
};
/****************************************************************************
* Instructions that differ depending on whether evex-encoded.
* Index 0 = no evex, 1 = evex
*/
const instr_info_t evex_prefix_extensions[][2] = {
{ /* evex_prefix_ext */
{OP_bound, 0x620000, "bound", xx, xx, Gv, Ma, xx, mrm|i64, x, END_LIST},
{PREFIX, 0x620000, "(evex prefix)", xx, xx, xx, xx, xx, no, x, PREFIX_EVEX},
},
};
/****************************************************************************
* Instructions that differ depending on whether a rex prefix is present.
*/
/* Instructions that differ depending on whether rex.b in is present.
* The table is indexed by rex.b: index 0 is for no rex.b.
*/
const instr_info_t rex_b_extensions[][2] = {
{ /* rex.b extension 0 */
{OP_nop, 0x900000, "nop", xx, xx, xx, xx, xx, no, x, tpe[103][2]},
/* For decoding we avoid needing new operand types by only getting
* here if rex.b is set. For encode, we would need either to take
* REQUIRES_REX + OPCODE_SUFFIX or a new operand type for registers that
* must be extended (could also try to list r8 instead of eax but
* have to make sure all decode/encode routines can handle that as most
* assume the registers listed here are 32-bit base): that's too
* much effort for a corner case that we're not 100% certain works on
* all x64 processors, so we just don't list in the encoding chain.
*/
{OP_xchg, 0x900000, "xchg", eAX_x, eAX, eAX_x, eAX, xx, o64, x, END_LIST},
},
};
/* Instructions that differ depending on whether rex.w in is present.
* The table is indexed by rex.w: index 0 is for no rex.w.
*/
const instr_info_t rex_w_extensions[][2] = {
{ /* rex.w extension 0 */
{OP_fxsave32, 0x0fae30, "fxsave", Me, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_fxsave64, 0x0fae30, "fxsave64", Me, xx, xx, xx, xx, mrm|rex, x, END_LIST},
},
{ /* rex.w extension 1 */
{OP_fxrstor32, 0x0fae31, "fxrstor", xx, xx, Me, xx, xx, mrm, x, END_LIST},
{OP_fxrstor64, 0x0fae31, "fxrstor64", xx, xx, Me, xx, xx, mrm|rex, o64, END_LIST},
},
{ /* rex.w extension 2 */
{OP_xsave32, 0x0fae34, "xsave", Mxsave, xx, edx, eax, xx, mrm, x, END_LIST},
{OP_xsave64, 0x0fae34, "xsave64", Mxsave, xx, edx, eax, xx, mrm|rex, o64, END_LIST},
},
{ /* rex.w extension 3 */
{OP_xrstor32, 0x0fae35, "xrstor", xx, xx, Mxsave, edx, eax, mrm, x, END_LIST},
{OP_xrstor64, 0x0fae35, "xrstor64", xx, xx, Mxsave, edx, eax, mrm|rex, o64, END_LIST},
},
{ /* rex.w extension 4 */
{OP_xsaveopt32, 0x0fae36, "xsaveopt", Mxsave, xx, edx, eax, xx, mrm, x, END_LIST},
{OP_xsaveopt64, 0x0fae36, "xsaveopt64", Mxsave, xx, edx, eax, xx, mrm|rex, o64, END_LIST},
},
{ /* rex.w extension 5 */
{OP_xsavec32, 0x0fc734, "xsavec", Mxsave, xx, edx, eax, xx, mrm, x, END_LIST},
{OP_xsavec64, 0x0fc734, "xsavec64", Mxsave, xx, edx, eax, xx, mrm|rex, o64, END_LIST},
},
};
/****************************************************************************
* 3-byte-opcode instructions: 0x0f 0x38 and 0x0f 0x3a.
* SSSE3 and SSE4.
*
* XXX: if they add more 2nd byte possibilities, we could switch to one
* large table here and one extension type with indices into which subtable.
* For now we have two separate tables.
*
* N.B.: if any are added here that do not take modrm bytes, or whose
* size can vary based on data16 or addr16, we need to modify our
* decode_fast table assumptions!
*
* Many of these only come in Vdq,Wdq forms, yet still require the 0x66 prefix.
* Rather than waste space in the prefix_extensions table for 4 entries 3 of which
* are invalid, and need another layer of lookup, we use the new REQUIRES_PREFIX
* flag ("reqp").
*
* Since large parts of the opcode space are empty, we save space by having a
* table of 256 indices instead of 256 instr_info_t structs.
*/
const byte third_byte_38_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 96, 97, 56, 57, /* 0 */
16, 0, 0, 88, 17, 18,111, 19, 89, 90, 91, 0, 13, 14, 15, 0, /* 1 */
20, 21, 22, 23, 24, 25, 0, 0, 26, 27, 28, 29, 92, 93, 94, 95, /* 2 */
30, 31, 32, 33, 34, 35,112, 36, 37, 38, 39, 40, 41, 42, 43, 44, /* 3 */
45, 46, 0, 0, 0,113,114,115, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 118,119,108, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 116,117, 0, 0, 0, 0, 0, 0, /* 7 */
49, 50,103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109, 0,110, 0, /* 8 */
104,105,106,107, 0, 0, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, /* 9 */
0, 0, 0, 0, 0, 0, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, /* A */
0, 0, 0, 0, 0, 0, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 52, 53, 54, 55, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
47, 48,100, 99, 0,101,102, 98, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const instr_info_t third_byte_38[] = {
{INVALID, 0x38ff18, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* 0*/
/**** SSSE3 ****/
{PREFIX_EXT, 0x380018, "(prefix ext 118)", xx, xx, xx, xx, xx, mrm, x, 118},/* 1*/
{PREFIX_EXT, 0x380118, "(prefix ext 119)", xx, xx, xx, xx, xx, mrm, x, 119},/* 2*/
{PREFIX_EXT, 0x380218, "(prefix ext 120)", xx, xx, xx, xx, xx, mrm, x, 120},/* 3*/
{PREFIX_EXT, 0x380318, "(prefix ext 121)", xx, xx, xx, xx, xx, mrm, x, 121},/* 4*/
{PREFIX_EXT, 0x380418, "(prefix ext 122)", xx, xx, xx, xx, xx, mrm, x, 122},/* 5*/
{PREFIX_EXT, 0x380518, "(prefix ext 123)", xx, xx, xx, xx, xx, mrm, x, 123},/* 6*/
{PREFIX_EXT, 0x380618, "(prefix ext 124)", xx, xx, xx, xx, xx, mrm, x, 124},/* 7*/
{PREFIX_EXT, 0x380718, "(prefix ext 125)", xx, xx, xx, xx, xx, mrm, x, 125},/* 8*/
{PREFIX_EXT, 0x380818, "(prefix ext 126)", xx, xx, xx, xx, xx, mrm, x, 126},/* 9*/
{PREFIX_EXT, 0x380918, "(prefix ext 127)", xx, xx, xx, xx, xx, mrm, x, 127},/*10*/
{PREFIX_EXT, 0x380a18, "(prefix ext 128)", xx, xx, xx, xx, xx, mrm, x, 128},/*11*/
{PREFIX_EXT, 0x380b18, "(prefix ext 129)", xx, xx, xx, xx, xx, mrm, x, 129},/*12*/
{PREFIX_EXT, 0x381c18, "(prefix ext 130)", xx, xx, xx, xx, xx, mrm, x, 130},/*13*/
{PREFIX_EXT, 0x381d18, "(prefix ext 131)", xx, xx, xx, xx, xx, mrm, x, 131},/*14*/
{PREFIX_EXT, 0x381e18, "(prefix ext 132)", xx, xx, xx, xx, xx, mrm, x, 132},/*15*/
/**** SSE4 ****/
{OP_pblendvb, 0x66381018, "pblendvb", Vdq, xx, Wdq,xmm0,Vdq, mrm|reqp,x, END_LIST},/*16*/
{OP_blendvps, 0x66381418, "blendvps", Vdq, xx, Wdq,xmm0,Vdq, mrm|reqp,x, END_LIST},/*17*/
{OP_blendvpd, 0x66381518, "blendvpd", Vdq, xx, Wdq,xmm0,Vdq, mrm|reqp,x, END_LIST},/*18*/
{E_VEX_EXT, 0x66381718, "(e_vex ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},/*19*/
/* 20 */
{E_VEX_EXT, 0x66382018, "(e_vex ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},/*20*/
{E_VEX_EXT, 0x66382118, "(e_vex ext 5)", xx, xx, xx, xx, xx, mrm, x, 5},/*21*/
{E_VEX_EXT, 0x66382218, "(e_vex ext 6)", xx, xx, xx, xx, xx, mrm, x, 6},/*22*/
{E_VEX_EXT, 0x66382318, "(e_vex ext 7)", xx, xx, xx, xx, xx, mrm, x, 7},/*23*/
{E_VEX_EXT, 0x66382418, "(e_vex ext 8)", xx, xx, xx, xx, xx, mrm, x, 8},/*24*/
{E_VEX_EXT, 0x66382518, "(e_vex ext 9)", xx, xx, xx, xx, xx, mrm, x, 9},/*25*/
{E_VEX_EXT, 0x66382818, "(e_vex ext 10)", xx, xx, xx, xx, xx, mrm, x, 10},/*26*/
{E_VEX_EXT, 0x66382918, "(e_vex ext 11)", xx, xx, xx, xx, xx, mrm, x, 11},/*27*/
{E_VEX_EXT, 0x66382a18, "(e_vex ext 12)", xx, xx, xx, xx, xx, mrm, x, 12},/*28*/
{E_VEX_EXT, 0x66382b18, "(e_vex ext 13)", xx, xx, xx, xx, xx, mrm, x, 13},/*29*/
/* 30 */
{E_VEX_EXT, 0x66383018, "(e_vex ext 14)", xx, xx, xx, xx, xx, mrm, x, 14},/*30*/
{E_VEX_EXT, 0x66383118, "(e_vex ext 15)", xx, xx, xx, xx, xx, mrm, x, 15},/*31*/
{E_VEX_EXT, 0x66383218, "(e_vex ext 16)", xx, xx, xx, xx, xx, mrm, x, 16},/*32*/
{E_VEX_EXT, 0x66383318, "(e_vex ext 17)", xx, xx, xx, xx, xx, mrm, x, 17},/*33*/
{E_VEX_EXT, 0x66383418, "(e_vex ext 18)", xx, xx, xx, xx, xx, mrm, x, 18},/*34*/
{E_VEX_EXT, 0x66383518, "(e_vex ext 19)", xx, xx, xx, xx, xx, mrm, x, 19},/*35*/
{E_VEX_EXT, 0x66383718, "(e_vex ext 20)", xx, xx, xx, xx, xx, mrm, x, 20},/*36*/
{E_VEX_EXT, 0x66383818, "(e_vex ext 21)", xx, xx, xx, xx, xx, mrm, x, 21},/*37*/
{E_VEX_EXT, 0x66383918, "(e_vex ext 22)", xx, xx, xx, xx, xx, mrm, x, 22},/*38*/
{E_VEX_EXT, 0x66383a18, "(e_vex ext 23)", xx, xx, xx, xx, xx, mrm, x, 23},/*39*/
{E_VEX_EXT, 0x66383b18, "(e_vex ext 24)", xx, xx, xx, xx, xx, mrm, x, 24},/*40*/
{E_VEX_EXT, 0x66383c18, "(e_vex ext 25)", xx, xx, xx, xx, xx, mrm, x, 25},/*41*/
{E_VEX_EXT, 0x66383d18, "(e_vex ext 26)", xx, xx, xx, xx, xx, mrm, x, 26},/*42*/
{E_VEX_EXT, 0x66383e18, "(e_vex ext 27)", xx, xx, xx, xx, xx, mrm, x, 27},/*43*/
{E_VEX_EXT, 0x66383f18, "(e_vex ext 28)", xx, xx, xx, xx, xx, mrm, x, 28},/*44*/
/* 40 */
{E_VEX_EXT, 0x66384018, "(e_vex ext 29)", xx, xx, xx, xx, xx, mrm, x, 29},/*45*/
{E_VEX_EXT, 0x66384118, "(e_vex ext 30)", xx, xx, xx, xx, xx, mrm, x, 30},/*46*/
/* f0 */
{PREFIX_EXT, 0x38f018, "(prefix ext 138)", xx, xx, xx, xx, xx, mrm, x, 138},/*47*/
{PREFIX_EXT, 0x38f118, "(prefix ext 139)", xx, xx, xx, xx, xx, mrm, x, 139},/*48*/
/* 80 */
{OP_invept, 0x66388018, "invept", xx, xx, Gr, Mdq, xx, mrm|reqp, x, END_LIST},/*49*/
{OP_invvpid, 0x66388118, "invvpid", xx, xx, Gr, Mdq, xx, mrm|reqp, x, END_LIST},/*50*/
/* db-df */
{E_VEX_EXT, 0x6638db18, "(e_vex ext 31)", xx, xx, xx, xx, xx, mrm, x, 31},/*51*/
{E_VEX_EXT, 0x6638dc18, "(e_vex ext 32)", xx, xx, xx, xx, xx, mrm, x, 32},/*52*/
{E_VEX_EXT, 0x6638dd18, "(e_vex ext 33)", xx, xx, xx, xx, xx, mrm, x, 33},/*53*/
{E_VEX_EXT, 0x6638de18, "(e_vex ext 34)", xx, xx, xx, xx, xx, mrm, x, 34},/*54*/
{E_VEX_EXT, 0x6638df18, "(e_vex ext 35)", xx, xx, xx, xx, xx, mrm, x, 35},/*55*/
/* AVX */
{E_VEX_EXT, 0x66380e18, "(e_vex ext 59)", xx, xx, xx, xx, xx, mrm, x, 59},/*56*/
{E_VEX_EXT, 0x66380f18, "(e_vex ext 60)", xx, xx, xx, xx, xx, mrm, x, 60},/*57*/
/* FMA 96-9f */
{VEX_W_EXT, 0x66389618, "(vex_W ext 6)", xx, xx, xx, xx, xx, mrm, x, 6},/*58*/
{VEX_W_EXT, 0x66389718, "(vex_W ext 9)", xx, xx, xx, xx, xx, mrm, x, 9},/*59*/
{VEX_W_EXT, 0x66389818, "(vex_W ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},/*60*/
{VEX_W_EXT, 0x66389918, "(vex_W ext 3)", xx, xx, xx, xx, xx, mrm, x, 3},/*61*/
{VEX_W_EXT, 0x66389a18, "(vex_W ext 12)", xx, xx, xx, xx, xx, mrm, x, 12},/*62*/
{VEX_W_EXT, 0x66389b18, "(vex_W ext 15)", xx, xx, xx, xx, xx, mrm, x, 15},/*63*/
{VEX_W_EXT, 0x66389c18, "(vex_W ext 18)", xx, xx, xx, xx, xx, mrm, x, 18},/*64*/
{VEX_W_EXT, 0x66389d18, "(vex_W ext 21)", xx, xx, xx, xx, xx, mrm, x, 21},/*65*/
{VEX_W_EXT, 0x66389e18, "(vex_W ext 24)", xx, xx, xx, xx, xx, mrm, x, 24},/*66*/
{VEX_W_EXT, 0x66389f18, "(vex_W ext 27)", xx, xx, xx, xx, xx, mrm, x, 27},/*67*/
/* FMA a6-af */
{VEX_W_EXT, 0x6638a618, "(vex_W ext 7)", xx, xx, xx, xx, xx, mrm, x, 7},/*68*/
{VEX_W_EXT, 0x6638a718, "(vex_W ext 10)", xx, xx, xx, xx, xx, mrm, x, 10},/*69*/
{VEX_W_EXT, 0x6638a818, "(vex_W ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},/*70*/
{VEX_W_EXT, 0x6638a918, "(vex_W ext 4)", xx, xx, xx, xx, xx, mrm, x, 4},/*71*/
{VEX_W_EXT, 0x6638aa18, "(vex_W ext 13)", xx, xx, xx, xx, xx, mrm, x, 13},/*72*/
{VEX_W_EXT, 0x6638ab18, "(vex_W ext 16)", xx, xx, xx, xx, xx, mrm, x, 16},/*73*/
{VEX_W_EXT, 0x6638ac18, "(vex_W ext 19)", xx, xx, xx, xx, xx, mrm, x, 19},/*74*/
{VEX_W_EXT, 0x6638ad18, "(vex_W ext 22)", xx, xx, xx, xx, xx, mrm, x, 22},/*75*/
{VEX_W_EXT, 0x6638ae18, "(vex_W ext 25)", xx, xx, xx, xx, xx, mrm, x, 25},/*76*/
{VEX_W_EXT, 0x6638af18, "(vex_W ext 28)", xx, xx, xx, xx, xx, mrm, x, 28},/*77*/
/* FMA b6-bf */
{VEX_W_EXT, 0x6638b618, "(vex_W ext 8)", xx, xx, xx, xx, xx, mrm, x, 8},/*78*/
{VEX_W_EXT, 0x6638b718, "(vex_W ext 11)", xx, xx, xx, xx, xx, mrm, x, 11},/*79*/
{VEX_W_EXT, 0x6638b818, "(vex_W ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},/*80*/
{VEX_W_EXT, 0x6638b918, "(vex_W ext 5)", xx, xx, xx, xx, xx, mrm, x, 5},/*81*/
{VEX_W_EXT, 0x6638ba18, "(vex_W ext 14)", xx, xx, xx, xx, xx, mrm, x, 14},/*82*/
{VEX_W_EXT, 0x6638bb18, "(vex_W ext 17)", xx, xx, xx, xx, xx, mrm, x, 17},/*83*/
{VEX_W_EXT, 0x6638bc18, "(vex_W ext 20)", xx, xx, xx, xx, xx, mrm, x, 20},/*84*/
{VEX_W_EXT, 0x6638bd18, "(vex_W ext 23)", xx, xx, xx, xx, xx, mrm, x, 23},/*85*/
{VEX_W_EXT, 0x6638be18, "(vex_W ext 26)", xx, xx, xx, xx, xx, mrm, x, 26},/*86*/
{VEX_W_EXT, 0x6638bf18, "(vex_W ext 29)", xx, xx, xx, xx, xx, mrm, x, 29},/*87*/
/* AVX overlooked in original pass */
{E_VEX_EXT, 0x66381318, "(e_vex ext 63)", xx, xx, xx, xx, xx, mrm, x, 63},/*88*/
{E_VEX_EXT, 0x66381818, "(e_vex ext 64)", xx, xx, xx, xx, xx, mrm, x, 64},/*89*/
{E_VEX_EXT, 0x66381918, "(e_vex ext 65)", xx, xx, xx, xx, xx, mrm, x, 65},/*90*/
{E_VEX_EXT, 0x66381a18, "(e_vex ext 66)", xx, xx, xx, xx, xx, mrm, x, 66},/*91*/
{E_VEX_EXT, 0x66382c18, "(e_vex ext 67)", xx, xx, xx, xx, xx, mrm, x, 67},/*92*/
{E_VEX_EXT, 0x66382d18, "(e_vex ext 68)", xx, xx, xx, xx, xx, mrm, x, 68},/*93*/
{E_VEX_EXT, 0x66382e18, "(e_vex ext 69)", xx, xx, xx, xx, xx, mrm, x, 69},/*94*/
{E_VEX_EXT, 0x66382f18, "(e_vex ext 70)", xx, xx, xx, xx, xx, mrm, x, 70},/*95*/
{E_VEX_EXT, 0x66380c18, "(e_vex ext 77)", xx, xx, xx, xx, xx, mrm, x, 77},/*96*/
{E_VEX_EXT, 0x66380d18, "(e_vex ext 78)", xx, xx, xx, xx, xx, mrm, x, 78},/*97*/
/* TBM */
{PREFIX_EXT, 0x38f718, "(prefix ext 141)", xx, xx, xx, xx, xx, mrm, x, 141}, /*98*/
/* BMI1 */
{EXTENSION, 0x38f318, "(group 17)", By, xx, Ey, xx, xx, mrm|vex, x, 31}, /*99*/
/* marked reqp b/c it should have no prefix (prefixes for future opcodes) */
{OP_andn, 0x38f218, "andn", Gy, xx, By, Ey, xx, mrm|vex|reqp, fW6, END_LIST},/*100*/
/* BMI2 */
{PREFIX_EXT, 0x38f518, "(prefix ext 142)", xx, xx, xx, xx, xx, mrm, x, 142}, /*101*/
{PREFIX_EXT, 0x38f618, "(prefix ext 143)", xx, xx, xx, xx, xx, mrm, x, 143}, /*102*/
{OP_invpcid, 0x66388218, "invpcid", xx, xx, Gy, Mdq, xx, mrm|reqp, x, END_LIST},/*103*/
/* AVX2 */
{VEX_W_EXT, 0x66389018, "(vex_W ext 66)", xx, xx, xx, xx, xx, mrm|vex, x, 66},/*104*/
{VEX_W_EXT, 0x66389118, "(vex_W ext 67)", xx, xx, xx, xx, xx, mrm|vex, x, 67},/*105*/
{VEX_W_EXT, 0x66389218, "(vex_W ext 68)", xx, xx, xx, xx, xx, mrm|vex, x, 68},/*106*/
{VEX_W_EXT, 0x66389318, "(vex_W ext 69)", xx, xx, xx, xx, xx, mrm|vex, x, 69},/*107*/
{OP_vbroadcasti128,0x66385a18, "vbroadcasti128",Vqq,xx,Mdq,xx,xx,mrm|vex|reqp,x,END_LIST},/*108*/
{VEX_W_EXT, 0x66388c18, "(vex_W ext 70)", xx,xx,xx,xx,xx, mrm|vex|reqp, x, 70},/*109*/
{VEX_W_EXT, 0x66388e18, "(vex_W ext 71)", xx,xx,xx,xx,xx, mrm|vex|reqp, x, 71},/*110*/
/* Following Intel and not marking as packed float vs ints: just "qq". */
{OP_vpermps,0x66381618, "vpermps",Vqq,xx,Hqq,Wqq,xx, mrm|vex|reqp,x,END_LIST}, /*111*/
{OP_vpermd, 0x66383618, "vpermd", Vqq,xx,Hqq,Wqq,xx, mrm|vex|reqp,x,END_LIST}, /*112*/
{VEX_W_EXT, 0x66384518, "(vex_W ext 72)", xx,xx,xx,xx,xx, mrm|vex|reqp, x, 72},/*113*/
{OP_vpsravd,0x66384618, "vpsravd", Vx,xx,Hx,Wx,xx, mrm|vex|reqp, x, END_LIST}, /*114*/
{VEX_W_EXT, 0x66384718, "(vex_W ext 73)", xx,xx,xx,xx,xx, mrm|vex|reqp, x, 73},/*115*/
{OP_vpbroadcastb, 0x66387818, "vpbroadcastb", Vx, xx, Wb_dq, xx, xx, mrm|vex|reqp, x, END_LIST},/*116*/
{OP_vpbroadcastw, 0x66387918, "vpbroadcastw", Vx, xx, Ww_dq, xx, xx, mrm|vex|reqp, x, END_LIST},/*117*/
{OP_vpbroadcastd, 0x66385818, "vpbroadcastd", Vx, xx, Wd_dq, xx, xx, mrm|vex|reqp, x, END_LIST},/*118*/
{OP_vpbroadcastq, 0x66385918, "vpbroadcastq", Vx, xx, Wq_dq, xx, xx, mrm|vex|reqp, x, END_LIST},/*119*/
};
/* N.B.: every 0x3a instr so far has an immediate. If a version w/o an immed
* comes along we'll have to add a threebyte_3a_vex_extra[] table to decode_fast.c.
*/
const byte third_byte_3a_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
59,60,61, 0, 28,29,30, 0, 6, 7, 8, 9, 10,11,12, 1, /* 0 */
0, 0, 0, 0, 2, 3, 4, 5, 31,32, 0, 0, 0,33, 0, 0, /* 1 */
13,14,15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
63,64,65,66, 0, 0, 0, 0, 57,58, 0, 0, 0, 0, 0, 0, /* 3 */
16,17,18, 0, 23, 0,62, 0, 54,55,25,26, 27, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34,35,36,37, /* 5 */
19,20,21,22, 0, 0, 0, 0, 38,39,40,41, 42,43,44,45, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 46,47,48,49, 50,51,52,53, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,24, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const instr_info_t third_byte_3a[] = {
{INVALID, 0x3aff18, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* 0*/
/**** SSSE3 ****/
{PREFIX_EXT, 0x3a0f18, "(prefix ext 133)", xx, xx, xx, xx, xx, mrm, x, 133}, /* 1*/
/**** SSE4 ****/
{E_VEX_EXT, 0x663a1418, "(e_vex ext 36)", xx, xx, xx, xx, xx, mrm, x, 36},/* 2*/
{E_VEX_EXT, 0x663a1518, "(e_vex ext 37)", xx, xx, xx, xx, xx, mrm, x, 37},/* 3*/
{E_VEX_EXT, 0x663a1618, "(e_vex ext 38)", xx, xx, xx, xx, xx, mrm, x, 38},/* 4*/
{E_VEX_EXT, 0x663a1718, "(e_vex ext 39)", xx, xx, xx, xx, xx, mrm, x, 39},/* 5*/
{E_VEX_EXT, 0x663a0818, "(e_vex ext 40)", xx, xx, xx, xx, xx, mrm, x, 40},/* 6*/
{E_VEX_EXT, 0x663a0918, "(e_vex ext 41)", xx, xx, xx, xx, xx, mrm, x, 41},/* 7*/
{E_VEX_EXT, 0x663a0a18, "(e_vex ext 42)", xx, xx, xx, xx, xx, mrm, x, 42},/* 8*/
{E_VEX_EXT, 0x663a0b18, "(e_vex ext 43)", xx, xx, xx, xx, xx, mrm, x, 43},/* 9*/
{E_VEX_EXT, 0x663a0c18, "(e_vex ext 44)", xx, xx, xx, xx, xx, mrm, x, 44},/*10*/
{E_VEX_EXT, 0x663a0d18, "(e_vex ext 45)", xx, xx, xx, xx, xx, mrm, x, 45},/*11*/
{E_VEX_EXT, 0x663a0e18, "(e_vex ext 46)", xx, xx, xx, xx, xx, mrm, x, 46},/*12*/
/* 20 */
{E_VEX_EXT, 0x663a2018, "(e_vex ext 47)", xx, xx, xx, xx, xx, mrm, x, 47},/*13*/
{E_VEX_EXT, 0x663a2118, "(e_vex ext 48)", xx, xx, xx, xx, xx, mrm, x, 48},/*14*/
{E_VEX_EXT, 0x663a2218, "(e_vex ext 49)", xx, xx, xx, xx, xx, mrm, x, 49},/*15*/
/* 40 */
{E_VEX_EXT, 0x663a4018, "(e_vex ext 50)", xx, xx, xx, xx, xx, mrm, x, 50},/*16*/
{E_VEX_EXT, 0x663a4118, "(e_vex ext 51)", xx, xx, xx, xx, xx, mrm, x, 51},/*17*/
{E_VEX_EXT, 0x663a4218, "(e_vex ext 52)", xx, xx, xx, xx, xx, mrm, x, 52},/*18*/
/* 60 */
{E_VEX_EXT, 0x663a6018, "(e_vex ext 53)", xx, xx, xx, xx, xx, mrm, x, 53},/*19*/
{E_VEX_EXT, 0x663a6118, "(e_vex ext 54)", xx, xx, xx, xx, xx, mrm, x, 54},/*20*/
{E_VEX_EXT, 0x663a6218, "(e_vex ext 55)", xx, xx, xx, xx, xx, mrm, x, 55},/*21*/
{E_VEX_EXT, 0x663a6318, "(e_vex ext 56)", xx, xx, xx, xx, xx, mrm, x, 56},/*22*/
{E_VEX_EXT, 0x663a4418, "(e_vex ext 57)", xx, xx, xx, xx, xx, mrm, x, 57},/*23*/
{E_VEX_EXT, 0x663adf18, "(e_vex ext 58)", xx, xx, xx, xx, xx, mrm, x, 58},/*24*/
/* AVX overlooked in original pass */
{E_VEX_EXT, 0x663a4a18, "(e_vex ext 0)", xx, xx, xx, xx, xx, mrm, x, 0},/*25*/
{E_VEX_EXT, 0x663a4b18, "(e_vex ext 1)", xx, xx, xx, xx, xx, mrm, x, 1},/*26*/
{E_VEX_EXT, 0x663a4c18, "(e_vex ext 2)", xx, xx, xx, xx, xx, mrm, x, 2},/*27*/
{E_VEX_EXT, 0x663a0418, "(e_vex ext 71)", xx, xx, xx, xx, xx, mrm, x, 71},/*28*/
{E_VEX_EXT, 0x663a0518, "(e_vex ext 72)", xx, xx, xx, xx, xx, mrm, x, 72},/*29*/
{E_VEX_EXT, 0x663a0618, "(e_vex ext 73)", xx, xx, xx, xx, xx, mrm, x, 73},/*30*/
{E_VEX_EXT, 0x663a1818, "(e_vex ext 74)", xx, xx, xx, xx, xx, mrm, x, 74},/*31*/
{E_VEX_EXT, 0x663a1918, "(e_vex ext 75)", xx, xx, xx, xx, xx, mrm, x, 75},/*32*/
{E_VEX_EXT, 0x663a1d18, "(e_vex ext 76)", xx, xx, xx, xx, xx, mrm, x, 76},/*33*/
/* FMA4 */
{VEX_W_EXT,0x663a5c18, "(vex_W ext 30)", xx, xx, xx, xx, xx, mrm, x, 30},/*34*/
{VEX_W_EXT,0x663a5d18, "(vex_W ext 31)", xx, xx, xx, xx, xx, mrm, x, 31},/*35*/
{VEX_W_EXT,0x663a5e18, "(vex_W ext 32)", xx, xx, xx, xx, xx, mrm, x, 32},/*36*/
{VEX_W_EXT,0x663a5f18, "(vex_W ext 33)", xx, xx, xx, xx, xx, mrm, x, 33},/*37*/
{VEX_W_EXT,0x663a6818, "(vex_W ext 34)", xx, xx, xx, xx, xx, mrm, x, 34},/*38*/
{VEX_W_EXT,0x663a6918, "(vex_W ext 35)", xx, xx, xx, xx, xx, mrm, x, 35},/*39*/
{VEX_W_EXT,0x663a6a18, "(vex_W ext 36)", xx, xx, xx, xx, xx, mrm, x, 36},/*40*/
{VEX_W_EXT,0x663a6b18, "(vex_W ext 37)", xx, xx, xx, xx, xx, mrm, x, 37},/*41*/
{VEX_W_EXT,0x663a6c18, "(vex_W ext 38)", xx, xx, xx, xx, xx, mrm, x, 38},/*42*/
{VEX_W_EXT,0x663a6d18, "(vex_W ext 39)", xx, xx, xx, xx, xx, mrm, x, 39},/*43*/
{VEX_W_EXT,0x663a6e18, "(vex_W ext 40)", xx, xx, xx, xx, xx, mrm, x, 40},/*44*/
{VEX_W_EXT,0x663a6f18, "(vex_W ext 41)", xx, xx, xx, xx, xx, mrm, x, 41},/*45*/
{VEX_W_EXT,0x663a7818, "(vex_W ext 42)", xx, xx, xx, xx, xx, mrm, x, 42},/*46*/
{VEX_W_EXT,0x663a7918, "(vex_W ext 43)", xx, xx, xx, xx, xx, mrm, x, 43},/*47*/
{VEX_W_EXT,0x663a7a18, "(vex_W ext 44)", xx, xx, xx, xx, xx, mrm, x, 44},/*48*/
{VEX_W_EXT,0x663a7b18, "(vex_W ext 45)", xx, xx, xx, xx, xx, mrm, x, 45},/*49*/
{VEX_W_EXT,0x663a7c18, "(vex_W ext 46)", xx, xx, xx, xx, xx, mrm, x, 46},/*50*/
{VEX_W_EXT,0x663a7d18, "(vex_W ext 47)", xx, xx, xx, xx, xx, mrm, x, 47},/*51*/
{VEX_W_EXT,0x663a7e18, "(vex_W ext 48)", xx, xx, xx, xx, xx, mrm, x, 48},/*52*/
{VEX_W_EXT,0x663a7f18, "(vex_W ext 49)", xx, xx, xx, xx, xx, mrm, x, 49},/*53*/
/* XOP */
{VEX_W_EXT,0x663a4818, "(vex_W ext 64)", xx, xx, xx, xx, xx, mrm, x, 64},/*54*/
{VEX_W_EXT,0x663a4918, "(vex_W ext 65)", xx, xx, xx, xx, xx, mrm, x, 65},/*55*/
/* BMI2 */
{OP_rorx, 0xf23af018, "rorx", Gy, xx, Ey, Ib, xx, mrm|vex|reqp, x, END_LIST},/*56*/
/* AVX2 */
{OP_vinserti128,0x663a3818,"vinserti128",Vqq,xx,Hqq,Wdq,Ib,mrm|vex|reqp,x,END_LIST},/*57*/
{OP_vextracti128,0x663a3918,"vextracti128",Wdq,xx,Vqq,Ib,xx,mrm|vex|reqp,x,END_LIST},/*58*/
{OP_vpermq, 0x663a0058, "vpermq", Vqq,xx,Wqq,Ib,xx,mrm|vex|reqp,x,END_LIST},/*59*/
/* Following Intel and not marking as packed float vs ints: just "qq". */
{OP_vpermpd,0x663a0158, "vpermpd",Vqq,xx,Wqq,Ib,xx,mrm|vex|reqp,x,END_LIST},/*60*/
{OP_vpblendd,0x663a0218,"vpblendd",Vx,xx,Hx,Wx,Ib, mrm|vex|reqp,x,END_LIST},/*61*/
{OP_vperm2i128,0x663a4618,"vperm2i128",Vqq,xx,Hqq,Wqq,Ib, mrm|vex|reqp,x,END_LIST},/*62*/
/* AVX-512, VEX prefix. */
{VEX_W_EXT,0x660f3010, "(vex_W ext 102)", xx, xx, xx, xx, xx, mrm|vex|reqp, x, 102},/*63*/
{VEX_W_EXT,0x660f3110, "(vex_W ext 103)", xx, xx, xx, xx, xx, mrm|vex|reqp, x, 103},/*64*/
{VEX_W_EXT,0x660f3210, "(vex_W ext 100)", xx, xx, xx, xx, xx, mrm|vex|reqp, x, 100},/*65*/
{VEX_W_EXT,0x660f3310, "(vex_W ext 101)", xx, xx, xx, xx, xx, mrm|vex|reqp, x, 101},/*66*/
};
/****************************************************************************
* Instructions that differ depending on vex.W
* Index is vex.W value
*/
const instr_info_t vex_W_extensions[][2] = {
{ /* vex_W_ext 0 */
{OP_vfmadd132ps,0x66389818,"vfmadd132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmadd132pd,0x66389858,"vfmadd132pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 1 */
{OP_vfmadd213ps,0x6638a818,"vfmadd213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmadd213pd,0x6638a858,"vfmadd213pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 2 */
{OP_vfmadd231ps,0x6638b818,"vfmadd231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmadd231pd,0x6638b858,"vfmadd231pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 3 */
{OP_vfmadd132ss,0x66389918,"vfmadd132ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfmadd132sd,0x66389958,"vfmadd132sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 4 */
{OP_vfmadd213ss,0x6638a918,"vfmadd213ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfmadd213sd,0x6638a958,"vfmadd213sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 5 */
{OP_vfmadd231ss,0x6638b918,"vfmadd231ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfmadd231sd,0x6638b958,"vfmadd231sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 6 */
{OP_vfmaddsub132ps,0x66389618,"vfmaddsub132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmaddsub132pd,0x66389658,"vfmaddsub132pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 7 */
{OP_vfmaddsub213ps,0x6638a618,"vfmaddsub213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmaddsub213pd,0x6638a658,"vfmaddsub213pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 8 */
{OP_vfmaddsub231ps,0x6638b618,"vfmaddsub231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmaddsub231pd,0x6638b658,"vfmaddsub231pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 9 */
{OP_vfmsubadd132ps,0x66389718,"vfmsubadd132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmsubadd132pd,0x66389758,"vfmsubadd132pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 10 */
{OP_vfmsubadd213ps,0x6638a718,"vfmsubadd213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmsubadd213pd,0x6638a758,"vfmsubadd213pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 11 */
{OP_vfmsubadd231ps,0x6638b718,"vfmsubadd231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmsubadd231pd,0x6638b758,"vfmsubadd231pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 12 */
{OP_vfmsub132ps,0x66389a18,"vfmsub132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmsub132pd,0x66389a58,"vfmsub132pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 13 */
{OP_vfmsub213ps,0x6638aa18,"vfmsub213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmsub213pd,0x6638aa58,"vfmsub213pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 14 */
{OP_vfmsub231ps,0x6638ba18,"vfmsub231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfmsub231pd,0x6638ba58,"vfmsub231pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 15 */
{OP_vfmsub132ss,0x66389b18,"vfmsub132ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfmsub132sd,0x66389b58,"vfmsub132sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 16 */
{OP_vfmsub213ss,0x6638ab18,"vfmsub213ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfmsub213sd,0x6638ab58,"vfmsub213sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 17 */
{OP_vfmsub231ss,0x6638bb18,"vfmsub231ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfmsub231sd,0x6638bb58,"vfmsub231sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 18 */
{OP_vfnmadd132ps,0x66389c18,"vfnmadd132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfnmadd132pd,0x66389c58,"vfnmadd132pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 19 */
{OP_vfnmadd213ps,0x6638ac18,"vfnmadd213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfnmadd213pd,0x6638ac58,"vfnmadd213pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 20 */
{OP_vfnmadd231ps,0x6638bc18,"vfnmadd231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfnmadd231pd,0x6638bc58,"vfnmadd231pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 21 */
{OP_vfnmadd132ss,0x66389d18,"vfnmadd132ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfnmadd132sd,0x66389d58,"vfnmadd132sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 22 */
{OP_vfnmadd213ss,0x6638ad18,"vfnmadd213ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfnmadd213sd,0x6638ad58,"vfnmadd213sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 23 */
{OP_vfnmadd231ss,0x6638bd18,"vfnmadd231ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfnmadd231sd,0x6638bd58,"vfnmadd231sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 24 */
{OP_vfnmsub132ps,0x66389e18,"vfnmsub132ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfnmsub132pd,0x66389e58,"vfnmsub132pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 25 */
{OP_vfnmsub213ps,0x6638ae18,"vfnmsub213ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfnmsub213pd,0x6638ae58,"vfnmsub213pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 26 */
{OP_vfnmsub231ps,0x6638be18,"vfnmsub231ps",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
{OP_vfnmsub231pd,0x6638be58,"vfnmsub231pd",Vvs,xx,Hvs,Wvs,Vvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 27 */
{OP_vfnmsub132ss,0x66389f18,"vfnmsub132ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfnmsub132sd,0x66389f58,"vfnmsub132sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 28 */
{OP_vfnmsub213ss,0x6638af18,"vfnmsub213ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfnmsub213sd,0x6638af58,"vfnmsub213sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 29 */
{OP_vfnmsub231ss,0x6638bf18,"vfnmsub231ss",Vss,xx,Hss,Wss,Vss,mrm|vex|reqp,x,END_LIST},
{OP_vfnmsub231sd,0x6638bf58,"vfnmsub231sd",Vsd,xx,Hsd,Wsd,Vsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 30 */
{OP_vfmaddsubps,0x663a5c18,"vfmaddsubps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[30][1]},
{OP_vfmaddsubps,0x663a5c58,"vfmaddsubps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 31 */
{OP_vfmaddsubpd,0x663a5d18,"vfmaddsubpd",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[31][1]},
{OP_vfmaddsubpd,0x663a5d58,"vfmaddsubpd",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 32 */
{OP_vfmsubaddps,0x663a5e18,"vfmsubaddps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[32][1]},
{OP_vfmsubaddps,0x663a5e58,"vfmsubaddps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 33 */
{OP_vfmsubaddpd,0x663a5f18,"vfmsubaddpd",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[33][1]},
{OP_vfmsubaddpd,0x663a5f58,"vfmsubaddpd",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 34 */
{OP_vfmaddps,0x663a6818,"vfmaddps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[34][1]},
{OP_vfmaddps,0x663a6858,"vfmaddps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 35 */
{OP_vfmaddpd,0x663a6918,"vfmaddpd",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[35][1]},
{OP_vfmaddpd,0x663a6958,"vfmaddpd",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 36 */
{OP_vfmaddss,0x663a6a18,"vfmaddss",Vdq,xx,Lss,Wss,Hss,mrm|vex|reqp,x,tvexw[36][1]},
{OP_vfmaddss,0x663a6a58,"vfmaddss",Vdq,xx,Lss,Hss,Wss,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 37 */
{OP_vfmaddsd,0x663a6b18,"vfmaddsd",Vdq,xx,Lsd,Wsd,Hsd,mrm|vex|reqp,x,tvexw[37][1]},
{OP_vfmaddsd,0x663a6b58,"vfmaddsd",Vdq,xx,Lsd,Hsd,Wsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 38 */
{OP_vfmsubps,0x663a6c18,"vfmsubps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[38][1]},
{OP_vfmsubps,0x663a6c58,"vfmsubps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 39 */
{OP_vfmsubpd,0x663a6d18,"vfmsubpd",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[39][1]},
{OP_vfmsubpd,0x663a6d58,"vfmsubpd",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 40 */
{OP_vfmsubss,0x663a6e18,"vfmsubss",Vdq,xx,Lss,Wss,Hss,mrm|vex|reqp,x,tvexw[40][1]},
{OP_vfmsubss,0x663a6e58,"vfmsubss",Vdq,xx,Lss,Hss,Wss,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 41 */
{OP_vfmsubsd,0x663a6f18,"vfmsubsd",Vdq,xx,Lsd,Wsd,Hsd,mrm|vex|reqp,x,tvexw[41][1]},
{OP_vfmsubsd,0x663a6f58,"vfmsubsd",Vdq,xx,Lsd,Hsd,Wsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 42 */
{OP_vfnmaddps,0x663a7818,"vfnmaddps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[42][1]},
{OP_vfnmaddps,0x663a7858,"vfnmaddps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 43 */
{OP_vfnmaddpd,0x663a7918,"vfnmaddpd",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[43][1]},
{OP_vfnmaddpd,0x663a7958,"vfnmaddpd",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 44 */
{OP_vfnmaddss,0x663a7a18,"vfnmaddss",Vdq,xx,Lss,Wss,Hss,mrm|vex|reqp,x,tvexw[44][1]},
{OP_vfnmaddss,0x663a7a58,"vfnmaddss",Vdq,xx,Lss,Hss,Wss,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 45 */
{OP_vfnmaddsd,0x663a7b18,"vfnmaddsd",Vdq,xx,Lsd,Wsd,Hsd,mrm|vex|reqp,x,tvexw[45][1]},
{OP_vfnmaddsd,0x663a7b58,"vfnmaddsd",Vdq,xx,Lsd,Hsd,Wsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 46 */
{OP_vfnmsubps,0x663a7c18,"vfnmsubps",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[46][1]},
{OP_vfnmsubps,0x663a7c58,"vfnmsubps",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 47 */
{OP_vfnmsubpd,0x663a7d18,"vfnmsubpd",Vvs,xx,Lvs,Wvs,Hvs,mrm|vex|reqp,x,tvexw[47][1]},
{OP_vfnmsubpd,0x663a7d58,"vfnmsubpd",Vvs,xx,Lvs,Hvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 48 */
{OP_vfnmsubss,0x663a7e18,"vfnmsubss",Vdq,xx,Lss,Wss,Hss,mrm|vex|reqp,x,tvexw[48][1]},
{OP_vfnmsubss,0x663a7e58,"vfnmsubss",Vdq,xx,Lss,Hss,Wss,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 49 */
{OP_vfnmsubsd,0x663a7f18,"vfnmsubsd",Vdq,xx,Lsd,Wsd,Hsd,mrm|vex|reqp,x,tvexw[49][1]},
{OP_vfnmsubsd,0x663a7f58,"vfnmsubsd",Vdq,xx,Lsd,Hsd,Wsd,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 50 */
{OP_vpcmov, 0x08a218,"vpcmov", Vvs,xx,Hvs,Wvs,Lvs,mrm|vex,x,tvexw[50][1]},
{OP_vpcmov, 0x08a258,"vpcmov", Vvs,xx,Hvs,Lvs,Wvs,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 51 */
{OP_vpperm, 0x08a318,"vpperm", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,tvexw[51][1]},
{OP_vpperm, 0x08a358,"vpperm", Vdq,xx,Hdq,Ldq,Wdq,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 52 */
{OP_vprotb, 0x099018,"vprotb", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[52][1]},
{OP_vprotb, 0x099058,"vprotb", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 53 */
{OP_vprotw, 0x099118,"vprotw", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[53][1]},
{OP_vprotw, 0x099158,"vprotw", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 54 */
{OP_vprotd, 0x099218,"vprotd", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[54][1]},
{OP_vprotd, 0x099258,"vprotd", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 55 */
{OP_vprotq, 0x099318,"vprotq", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[55][1]},
{OP_vprotq, 0x099358,"vprotq", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 56 */
{OP_vpshlb, 0x099418,"vpshlb", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[56][1]},
{OP_vpshlb, 0x099458,"vpshlb", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 57 */
{OP_vpshlw, 0x099518,"vpshlw", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[57][1]},
{OP_vpshlw, 0x099558,"vpshlw", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 58 */
{OP_vpshld, 0x099618,"vpshld", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[58][1]},
{OP_vpshld, 0x099658,"vpshld", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 59 */
{OP_vpshlq, 0x099718,"vpshlq", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[59][1]},
{OP_vpshlq, 0x099758,"vpshlq", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 60 */
{OP_vpshab, 0x099818,"vpshab", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[60][1]},
{OP_vpshab, 0x099858,"vpshab", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 61 */
{OP_vpshaw, 0x099918,"vpshaw", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[61][1]},
{OP_vpshaw, 0x099958,"vpshaw", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 62 */
{OP_vpshad, 0x099a18,"vpshad", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[62][1]},
{OP_vpshad, 0x099a58,"vpshad", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 63 */
{OP_vpshaq, 0x099b18,"vpshaq", Vdq,xx,Wdq,Hdq,xx,mrm|vex,x,tvexw[63][1]},
{OP_vpshaq, 0x099b58,"vpshaq", Vdq,xx,Hdq,Wdq,xx,mrm|vex,x,END_LIST},
}, { /* vex_W_ext 64 */
{OP_vpermil2ps,0x663a4818,"vpermil2ps",Vvs,xx,Hvs,Wvs,Lvs,mrm|vex|reqp,x,tvexw[64][1]},
{OP_vpermil2ps,0x663a4858,"vpermil2ps",Vvs,xx,Hvs,Lvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 65 */
{OP_vpermil2pd,0x663a4918,"vpermil2pd",Vvs,xx,Hvs,Wvs,Lvs,mrm|vex|reqp,x,tvexw[65][1]},
{OP_vpermil2pd,0x663a4958,"vpermil2pd",Vvs,xx,Hvs,Lvs,Wvs,mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 66 */
/* XXX: OP_v*gather* raise #UD if any pair of the index, mask, or destination
* registers are identical. We don't bother trying to detect that.
*/
{OP_vpgatherdd,0x66389018,"vpgatherdd",Vx,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vpgatherdq,0x66389058,"vpgatherdq",Vx,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 67 */
{OP_vpgatherqd,0x66389118,"vpgatherqd",Vx,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vpgatherqq,0x66389158,"vpgatherqq",Vx,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 68 */
{OP_vgatherdps,0x66389218,"vgatherdps",Vvs,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vgatherdpd,0x66389258,"vgatherdpd",Vvd,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 69 */
{OP_vgatherqps,0x66389318,"vgatherqps",Vvs,Hx,MVd,Hx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vgatherqpd,0x66389358,"vgatherqpd",Vvd,Hx,MVq,Hx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 70 */
{OP_vpmaskmovd,0x66388c18,"vpmaskmovd",Vx,xx,Hx,Mx,xx, mrm|vex|reqp|predcx,x,tvexw[71][0]},
{OP_vpmaskmovq,0x66388c58,"vpmaskmovq",Vx,xx,Hx,Mx,xx, mrm|vex|reqp|predcx,x,tvexw[71][1]},
}, { /* vex_W_ext 71 */
/* Conditional store => predcx */
{OP_vpmaskmovd,0x66388e18,"vpmaskmovd",Mx,xx,Vx,Hx,xx, mrm|vex|reqp|predcx,x,END_LIST},
{OP_vpmaskmovq,0x66388e58,"vpmaskmovq",Mx,xx,Vx,Hx,xx, mrm|vex|reqp|predcx,x,END_LIST},
}, { /* vex_W_ext 72 */
{OP_vpsrlvd,0x66384518,"vpsrlvd",Vx,xx,Hx,Wx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vpsrlvq,0x66384558,"vpsrlvq",Vx,xx,Hx,Wx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 73 */
{OP_vpsllvd,0x66384718,"vpsllvd",Vx,xx,Hx,Wx,xx, mrm|vex|reqp,x,END_LIST},
{OP_vpsllvq,0x66384758,"vpsllvq",Vx,xx,Hx,Wx,xx, mrm|vex|reqp,x,END_LIST},
}, { /* vex_W_ext 74 */
{OP_kmovw,0x0f9010,"kmovw",KPw,xx,KQw,xx,xx, mrm|vex,x,tvexw[76][0]},
{OP_kmovq,0x0f9050,"kmovq",KPq,xx,KQq,xx,xx, mrm|vex,x,tvexw[76][1]},
}, { /* vex_W_ext 75 */
{OP_kmovb,0x660f9010,"kmovb",KPb,xx,KQb,xx,xx, mrm|vex,x,tvexw[77][0]},
{OP_kmovd,0x660f9050,"kmovd",KPd,xx,KQd,xx,xx, mrm|vex,x,tvexw[77][1]},
}, { /* vex_W_ext 76 */
{OP_kmovw,0x0f9110,"kmovw",KQw,xx,KPw,xx,xx, mrm|vex,x,tvexw[78][0]},
{OP_kmovq,0x0f9150,"kmovq",KQq,xx,KPq,xx,xx, mrm|vex,x,tvexw[106][1]},
}, { /* vex_W_ext 77 */
{OP_kmovb,0x660f9110,"kmovb",KQb,xx,KPb,xx,xx, mrm|vex,x,tvexw[79][0]},
{OP_kmovd,0x660f9150,"kmovd",KQd,xx,KPd,xx,xx, mrm|vex,x,tvexw[106][0]},
}, { /* vex_W_ext 78 */
{OP_kmovw,0x0f9210,"kmovw",KPw,xx,Ry,xx,xx, mrm|vex,x,tvexw[80][0]},
{INVALID, 0x0f9250,"(bad)", xx,xx,xx,xx,xx, no,x,NA},
}, { /* vex_W_ext 79 */
{OP_kmovb,0x660f9210,"kmovb",KPb,xx,Ry,xx,xx, mrm|vex,x,tvexw[81][0]},
{INVALID, 0x660f9250,"(bad)", xx,xx,xx,xx,xx, no,x,NA},
}, { /* vex_W_ext 80 */
{OP_kmovw,0x0f9310,"kmovw", Gd,xx,KRw,xx,xx, mrm|vex,x,END_LIST},
{INVALID, 0x0f9450,"(bad)", xx,xx,xx,xx,xx, no,x,NA},
}, { /* vex_W_ext 81 */
{OP_kmovb,0x660f9310,"kmovb",Gd,xx,KRb,xx,xx, mrm|vex,x,END_LIST},
{INVALID, 0x660f9350,"(bad)",xx,xx,xx,xx,xx, no,x,NA},
}, { /* vex_W_ext 82 */
{OP_kandw,0x0f4110,"kandw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kandq,0x0f4150,"kandq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 83 */
{OP_kandb,0x660f4110,"kandb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kandd,0x660f4150,"kandd",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 84 */
{OP_kandnw,0x0f4210,"kandnw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kandnq,0x0f4250,"kandnq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 85 */
{OP_kandnb,0x660f4210,"kandnb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kandnd,0x660f4250,"kandnd",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 86 */
{OP_kunpckwd,0x0f4b10,"kunpckwd",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
{OP_kunpckdq,0x0f4b50,"kunpckdq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 87 */
{OP_kunpckbw,0x660f4b10,"kunpckbw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{INVALID, 0x660f4b50, "(bad)", xx,xx, xx, xx,xx, no,x,NA},
}, { /* vex_W_ext 88 */
{OP_knotw,0x0f4410,"knotw",KPw,xx,KRw,xx,xx, mrm|vex,x,END_LIST},
{OP_knotq,0x0f4450,"knotq",KPq,xx,KRq,xx,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 89 */
{OP_knotb,0x660f4410,"knotb",KPb,xx,KRb,xx,xx, mrm|vex,x,END_LIST},
{OP_knotd,0x660f4450,"knotd",KPd,xx,KRd,xx,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 90 */
{OP_korw,0x0f4510,"korw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_korq,0x0f4550,"korq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 91 */
{OP_korb,0x660f4510,"korb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kord,0x660f4550,"kord",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 92 */
{OP_kxnorw,0x0f4610,"kxnorw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kxnorq,0x0f4650,"kxnorq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 93 */
{OP_kxnorb,0x660f4610,"kxnorb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kxnord,0x660f4650,"kxnord",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 94 */
{OP_kxorw,0x0f4710,"kxorw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kxorq,0x0f4750,"kxorq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 95 */
{OP_kxorb,0x660f4710,"kxorb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kxord,0x660f4750,"kxord",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 96 */
{OP_kaddw,0x0f4a10,"kaddw",KPw,xx,KVw,KRw,xx, mrm|vex,x,END_LIST},
{OP_kaddq,0x0f4a50,"kaddq",KPq,xx,KVq,KRq,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 97 */
{OP_kaddb,0x660f4a10,"kaddb",KPb,xx,KVb,KRb,xx, mrm|vex,x,END_LIST},
{OP_kaddd,0x660f4a50,"kaddd",KPd,xx,KVd,KRd,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 98 */
{OP_kortestw,0x0f9810,"kortestw",KPw,xx,KRw,xx,xx, mrm|vex,(fWC|fWZ),END_LIST},
{OP_kortestq,0x0f9850,"kortestq",KPq,xx,KRq,xx,xx, mrm|vex,(fWC|fWZ),END_LIST},
}, { /* vex_W_ext 99 */
{OP_kortestb,0x660f9810,"kortestb",KPb,xx,KRb,xx,xx, mrm|vex,(fWC|fWZ),END_LIST},
{OP_kortestd,0x660f9850,"kortestd",KPd,xx,KRd,xx,xx, mrm|vex,(fWC|fWZ),END_LIST},
}, { /* vex_W_ext 100 */
{OP_kshiftlb,0x663a3208,"kshiftlb",KPb,xx,KRb,Ib,xx, mrm|vex,x,END_LIST},
{OP_kshiftlw,0x663a3248,"kshiftlw",KPw,xx,KRw,Ib,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 101 */
{OP_kshiftld,0x663a3308,"kshiftld",KPd,xx,KRd,Ib,xx, mrm|vex,x,END_LIST},
{OP_kshiftlq,0x663a3348,"kshiftlq",KPq,xx,KRq,Ib,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 102 */
{OP_kshiftrb,0x663a3008,"kshiftrb",KPb,xx,KRb,Ib,xx, mrm|vex,x,END_LIST},
{OP_kshiftrw,0x663a3048,"kshiftrw",KPw,xx,KRw,Ib,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 103 */
{OP_kshiftrd,0x663a3108,"kshiftrd",KPd,xx,KRd,Ib,xx, mrm|vex,x,END_LIST},
{OP_kshiftrq,0x663a3148,"kshiftrq",KPq,xx,KRq,Ib,xx, mrm|vex,x,END_LIST},
}, { /* vex_W_ext 104 */
{OP_ktestw,0x0f9910,"ktestw",KPw,xx,KRw,xx,xx, mrm|vex,fW6,END_LIST},
{OP_ktestq,0x0f9950,"ktestq",KPq,xx,KRq,xx,xx, mrm|vex,fW6,END_LIST},
}, { /* vex_W_ext 105 */
{OP_ktestb,0x660f9910,"ktestb",KPb,xx,KRb,xx,xx, mrm|vex,fW6,END_LIST},
{OP_ktestd,0x660f9950,"ktestd",KPd,xx,KRd,xx,xx, mrm|vex,fW6,END_LIST},
}, { /* vex_W_ext 106 */
{OP_kmovd,0xf20f9210,"kmovd",KPd,xx,Ry,xx,xx, mrm|vex,x,tvexw[107][0]},
{OP_kmovq,0xf20f9250,"kmovq",KPq,xx,Ry,xx,xx, mrm|vex,x,tvexw[107][1]},
}, { /* vex_W_ext 107 */
{OP_kmovd,0xf20f9310,"kmovd", Gd,xx,KRd,xx,xx, mrm|vex,x,END_LIST},
{OP_kmovq,0xf20f9350,"kmovq",Gd_q,xx,KRq,xx,xx, mrm|vex,x,END_LIST},
},
};
/****************************************************************************
* Instructions that differ depending on evex.W.
* Index is evex.W value
*/
const instr_info_t evex_W_extensions[][2] = {
{ /* evex_W_ext 0 */
{OP_vmovups, 0x0f1010,"vmovups", Ves,xx,KEd,Wes,xx,mrm|evex,x,tevexw[1][0]},
{INVALID, 0x0f1050,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
},
{ /* evex_W_ext 1 */
{OP_vmovups, 0x0f1110,"vmovups", Wes,xx,KEd,Ves,xx,mrm|evex,x,END_LIST},
{INVALID, 0x0f1150,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
},
{ /* evex_W_ext 2 */
{INVALID, 0x660f1010,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovupd, 0x660f1050,"vmovupd", Ved,xx,KEd,Wed,xx,mrm|evex,x,tevexw[3][1]},
},
{ /* evex_W_ext 3 */
{INVALID, 0x660f1110,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovupd, 0x660f1150,"vmovupd", Wed,xx,KEd,Ved,xx,mrm|evex,x,END_LIST},
},
{ /* evex_W_ext 4 */
{OP_vmovaps, 0x0f2810,"vmovaps", Ves,xx,KEd,Wes,xx,mrm|evex,x,tevexw[5][0]},
{INVALID, 0x0f2850,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
},
{ /* evex_W_ext 5 */
{OP_vmovaps, 0x0f2910,"vmovaps", Wes,xx,KEd,Ves,xx,mrm|evex,x,END_LIST},
{INVALID, 0x0f2950,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
},
{ /* evex_W_ext 6 */
{INVALID, 0x660f2810,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovapd, 0x660f2850,"vmovapd", Ved,xx,KEd,Wed,xx,mrm|evex,x,tevexw[7][1]},
},
{ /* evex_W_ext 7 */
{INVALID, 0x660f2910,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
{OP_vmovapd, 0x660f2950,"vmovapd", Wed,xx,KEd,Ved,xx,mrm|evex,x,END_LIST},
},
{ /* evex_W_ext 8 */
{OP_vmovdqa32, 0x660f6f10,"vmovdqa32",Vex,xx,KEw,Wex,xx,mrm|evex,x,tevexw[9][0]},
{OP_vmovdqa64, 0x660f6f50,"vmovdqa64",Vex,xx,KEw,Wex,xx,mrm|evex,x,tevexw[9][1]},
},
{ /* evex_W_ext 9 */
{OP_vmovdqa32, 0x660f7f10,"vmovdqa32",Wex,xx,KEw,Vex,xx,mrm|evex,x,END_LIST},
{OP_vmovdqa64, 0x660f7f50,"vmovdqa64",Wex,xx,KEw,Vex,xx,mrm|evex,x,END_LIST},
},
{ /* evex_W_ext 10 */
{OP_vmovdqu8, 0xf20f6f10,"vmovdqu8",Vex,xx,KEw,Wex,xx,mrm|evex,x,tevexw[12][0]},
{OP_vmovdqu16, 0xf20f6f50,"vmovdqu16",Vex,xx,KEw,Wex,xx,mrm|evex,x,tevexw[12][1]},
},
{ /* evex_W_ext 11 */
{OP_vmovdqu32, 0xf30f6f10,"vmovdqu32",Vex,xx,KEw,Wex,xx,mrm|evex,x,tevexw[13][0]},
{OP_vmovdqu64, 0xf30f6f50,"vmovdqu64",Vex,xx,KEw,Wex,xx,mrm|evex,x,tevexw[13][1]},
},
{ /* evex_W_ext 12 */
{OP_vmovdqu8, 0xf20f7f10,"vmovdqu8",Wex,xx,KEw,Vex,xx,mrm|evex,x,END_LIST},
{OP_vmovdqu16, 0xf20f7f50,"vmovdqu16",Wex,xx,KEw,Vex,xx,mrm|evex,x,END_LIST},
},
{ /* evex_W_ext 13 */
{OP_vmovdqu32, 0xf30f7f10,"vmovdqu32",Wex,xx,KEw,Vex,xx,mrm|evex,x,END_LIST},
{OP_vmovdqu64, 0xf30f7f50,"vmovdqu64",Wex,xx,KEw,Vex,xx,mrm|evex,x,END_LIST},
},
{ /* evex_W_ext 14 */
{OP_vmovlps, 0x0f1210, "vmovlps", Vq_dq, xx, Hq_dq, Wq_dq, xx, mrm|evex|reqL0, x, tevexw[15][0]}, /*"vmovhlps" if reg-reg */
{INVALID, 0x0f1250,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
},
{ /* evex_W_ext 15 */
{OP_vmovlps, 0x0f1310, "vmovlps", Mq, xx, Vq_dq, xx, xx, mrm|evex, x, END_LIST},
{INVALID, 0x0f1350,"(bad)", xx,xx,xx,xx,xx,no,x,NA},
},
};
/****************************************************************************
* XOP instructions
*
* Since large parts of the opcode space are empty, we save space by having
* tables of 256 indices instead of tables of 256 instr_info_t structs.
*/
/* N.B.: all XOP 0x08 are assumed to have an immediate. If this becomes
* untrue we'll have to add an xop_8_extra[] table in decode_fast.c.
*/
const byte xop_8_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 4, 5, /* 8 */
0, 0, 0, 0, 0, 6, 7, 8, 0, 0, 0, 0, 0, 0, 9,10, /* 9 */
0, 0,11,12, 0, 0,13, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0,14, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
15,16,17,18, 0, 0, 0, 0, 0, 0, 0, 0, 19,20,21,22, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23,24,25,26, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const byte xop_9_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0,58,59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
0, 0,61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
27,28,29,30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
31,32,33,34, 35,36,37,38, 39,40,41,42, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0,43,44,45, 0, 0,46,47, 0, 0, 0,48, 0, 0, 0, 0, /* C */
0,49,50,51, 0, 0,52,53, 0, 0, 0,54, 0, 0, 0, 0, /* D */
0,55,56,57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
/* N.B.: nothing here for initial XOP but upcoming TBM and LWP have opcodes here */
const byte xop_a_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
60, 0,62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const instr_info_t xop_extensions[] = {
{INVALID, 0x000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* 0*/
/* We are out of flags, and we want to share a lot of REQUIRES_VEX, so to
* distinguish XOP we just rely on the XOP.map_select being disjoint from
* the VEX.m-mmm field.
*/
/* XOP.map_select = 0x08 */
{OP_vpmacssww, 0x088518,"vpmacssww", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 1*/
{OP_vpmacsswd, 0x088618,"vpmacsswd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 2*/
{OP_vpmacssdql,0x088718,"vpmacssdql",Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 3*/
{OP_vpmacssdd, 0x088e18,"vpmacssdd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 4*/
{OP_vpmacssdqh,0x088f18,"vpmacssdqh",Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 5*/
{OP_vpmacsww, 0x089518,"vpmacsww", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 6*/
{OP_vpmacswd, 0x089618,"vpmacswd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 7*/
{OP_vpmacsdql, 0x089718,"vpmacsdql", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 8*/
{OP_vpmacsdd, 0x089e18,"vpmacsdd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /* 9*/
{OP_vpmacsdqh, 0x089f18,"vpmacsdqh", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /*10*/
{VEX_W_EXT, 0x08a218, "(vex_W ext 50)", xx,xx,xx,xx,xx, mrm|vex, x, 50}, /*11*/
{VEX_W_EXT, 0x08a318, "(vex_W ext 51)", xx,xx,xx,xx,xx, mrm|vex, x, 51}, /*12*/
{OP_vpmadcsswd,0x08a618,"vpmadcsswd",Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /*13*/
{OP_vpmadcswd, 0x08b618,"vpmadcswd", Vdq,xx,Hdq,Wdq,Ldq,mrm|vex,x,END_LIST}, /*14*/
{OP_vprotb, 0x08c018,"vprotb", Vdq,xx,Wdq,Ib,xx,mrm|vex,x,tvexw[52][0]},/*15*/
{OP_vprotw, 0x08c118,"vprotw", Vdq,xx,Wdq,Ib,xx,mrm|vex,x,tvexw[53][0]},/*16*/
{OP_vprotd, 0x08c218,"vprotd", Vdq,xx,Wdq,Ib,xx,mrm|vex,x,tvexw[54][0]},/*17*/
{OP_vprotq, 0x08c318,"vprotq", Vdq,xx,Wdq,Ib,xx,mrm|vex,x,tvexw[55][0]},/*18*/
{OP_vpcomb, 0x08cc18,"vpcomb", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*19*/
{OP_vpcomw, 0x08cd18,"vpcomw", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*20*/
{OP_vpcomd, 0x08ce18,"vpcomd", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*21*/
{OP_vpcomq, 0x08cf18,"vpcomq", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*22*/
{OP_vpcomub, 0x08ec18,"vpcomub", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*23*/
{OP_vpcomuw, 0x08ed18,"vpcomuw", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*24*/
{OP_vpcomud, 0x08ee18,"vpcomud", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*25*/
{OP_vpcomuq, 0x08ef18,"vpcomuq", Vdq,xx,Hdq,Wdq,Ib,mrm|vex,x,END_LIST}, /*26*/
/* XOP.map_select = 0x09 */
{OP_vfrczps, 0x098018,"vfrczps", Vvs,xx,Wvs,xx,xx,mrm|vex,x,END_LIST}, /*27*/
{OP_vfrczpd, 0x098118,"vfrczpd", Vvs,xx,Wvs,xx,xx,mrm|vex,x,END_LIST}, /*28*/
{OP_vfrczss, 0x098218,"vfrczss", Vss,xx,Wss,xx,xx,mrm|vex,x,END_LIST}, /*29*/
{OP_vfrczsd, 0x098318,"vfrczsd", Vsd,xx,Wsd,xx,xx,mrm|vex,x,END_LIST}, /*30*/
{VEX_W_EXT, 0x099018, "(vex_W ext 52)", xx,xx,xx,xx,xx, mrm|vex, x, 52}, /*31*/
{VEX_W_EXT, 0x099118, "(vex_W ext 53)", xx,xx,xx,xx,xx, mrm|vex, x, 53}, /*32*/
{VEX_W_EXT, 0x099218, "(vex_W ext 54)", xx,xx,xx,xx,xx, mrm|vex, x, 54}, /*33*/
{VEX_W_EXT, 0x099318, "(vex_W ext 55)", xx,xx,xx,xx,xx, mrm|vex, x, 55}, /*34*/
{VEX_W_EXT, 0x099418, "(vex_W ext 56)", xx,xx,xx,xx,xx, mrm|vex, x, 56}, /*35*/
{VEX_W_EXT, 0x099518, "(vex_W ext 57)", xx,xx,xx,xx,xx, mrm|vex, x, 57}, /*36*/
{VEX_W_EXT, 0x099618, "(vex_W ext 58)", xx,xx,xx,xx,xx, mrm|vex, x, 58}, /*37*/
{VEX_W_EXT, 0x099718, "(vex_W ext 59)", xx,xx,xx,xx,xx, mrm|vex, x, 59}, /*38*/
{VEX_W_EXT, 0x099818, "(vex_W ext 60)", xx,xx,xx,xx,xx, mrm|vex, x, 60}, /*39*/
{VEX_W_EXT, 0x099918, "(vex_W ext 61)", xx,xx,xx,xx,xx, mrm|vex, x, 61}, /*40*/
{VEX_W_EXT, 0x099a18, "(vex_W ext 62)", xx,xx,xx,xx,xx, mrm|vex, x, 62}, /*41*/
{VEX_W_EXT, 0x099b18, "(vex_W ext 63)", xx,xx,xx,xx,xx, mrm|vex, x, 63}, /*42*/
{OP_vphaddbw, 0x09c118,"vphaddbw", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*43*/
{OP_vphaddbd, 0x09c218,"vphaddbd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*44*/
{OP_vphaddbq, 0x09c318,"vphaddbq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*45*/
{OP_vphaddwd, 0x09c618,"vphaddwd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*46*/
{OP_vphaddwq, 0x09c718,"vphaddwq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*47*/
{OP_vphadddq, 0x09cb18,"vphadddq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*48*/
/* AMD decode table erroneously lists this as "vphaddubwd" */
{OP_vphaddubw, 0x09d118,"vphaddubw", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*49*/
{OP_vphaddubd, 0x09d218,"vphaddubd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*50*/
{OP_vphaddubq, 0x09d318,"vphaddubq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*51*/
{OP_vphadduwd, 0x09d618,"vphadduwd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*52*/
{OP_vphadduwq, 0x09d718,"vphadduwq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*53*/
{OP_vphaddudq, 0x09db18,"vphaddudq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*54*/
{OP_vphsubbw, 0x09e118,"vphsubbw", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*55*/
{OP_vphsubwd, 0x09e218,"vphsubwd", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*56*/
{OP_vphsubdq, 0x09e318,"vphsubdq", Vdq,xx,Wdq,xx,xx,mrm|vex,x,END_LIST}, /*57*/
{EXTENSION, 0x090118, "(XOP group 1)", xx,xx, xx,xx,xx, mrm|vex, x, 27}, /*58*/
{EXTENSION, 0x090218, "(XOP group 2)", xx,xx, xx,xx,xx, mrm|vex, x, 28}, /*59*/
/* XOP.map_select = 0x0a */
{OP_bextr, 0x0a1018, "bextr", Gy,xx,Ey,Id,xx, mrm|vex, fW6, END_LIST}, /*60*/
/* Later-added instrs, from various tables */
{EXTENSION, 0x091218, "(XOP group 3)", xx,xx, xx,xx,xx, mrm|vex, x, 29}, /*61*/
{EXTENSION, 0x0a1218, "(XOP group 4)", xx,xx, xx,xx,xx, mrm|vex, x, 30}, /*62*/
};
/****************************************************************************
* String instructions that differ depending on rep/repne prefix
*
* Note that Intel manuals prior to May 2011 claim that for x64 the count
* register for ins and outs is rcx by default, but for all other rep* is ecx.
* The AMD manual, and experimental evidence, contradicts this and has rcx
* as the default count register for all rep*.
* Furthermore, the Intel manual implies that w/o rex.w edi/esi are used
* rather than rdi/rsi: which again the AMD manual and experimental
* evidence contradict.
*/
const instr_info_t rep_extensions[][4] = {
/* FIXME: ins and outs access "I/O ports", are these memory addresses?
* if so, change Ib to Ob and change dx to i_dx (move to dest for outs)
*/
{ /* rep extension 0 */
{OP_ins, 0x6c0000, "ins", Yb, axDI, dx, axDI, xx, no, fRD, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_ins, 0xf36c0000, "rep ins", Yb, axDI, dx, axDI, axCX, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf36c0000, "rep ins", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 1 */
{OP_ins, 0x6d0000, "ins", Yz, axDI, dx, axDI, xx, no, fRD, tre[0][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_ins, 0xf36d0000, "rep ins", Yz, axDI, dx, axDI, axCX, xop_next, fRD, tre[0][2]},
{OP_CONTD, 0xf36d0000, "rep ins", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 2 */
{OP_outs, 0x6e0000, "outs", axSI, xx, Xb, dx, axSI, no, fRD, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_outs, 0xf36e0000, "rep outs", axSI, axCX, Xb, dx, axSI, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf36e0000, "rep outs", xx, xx, axCX, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 3 */
{OP_outs, 0x6f0000, "outs", axSI, xx, Xz, dx, axSI, no, fRD, tre[2][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_outs, 0xf36f0000, "rep outs", axSI, axCX, Xz, dx, axSI, xop_next, fRD, tre[2][2]},
{OP_CONTD, 0xf36f0000, "rep outs", xx, xx, axCX, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 4 */
{OP_movs, 0xa40000, "movs", Yb, axSI, Xb, axSI, axDI, xop_next, fRD, END_LIST},
{OP_CONTD, 0xa40000, "movs", axDI, xx, xx, xx, xx, no, fRD, END_LIST},
{OP_rep_movs, 0xf3a40000, "rep movs", Yb, axSI, Xb, axSI, axDI, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf3a40000, "rep movs", axDI, axCX, axCX, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 5 */
{OP_movs, 0xa50000, "movs", Yv, axSI, Xv, axSI, axDI, xop_next, fRD, tre[4][0]},
{OP_CONTD, 0xa50000, "movs", axDI, xx, xx, xx, xx, no, fRD, END_LIST},
{OP_rep_movs, 0xf3a50000, "rep movs", Yv, axSI, Xv, axSI, axDI, xop_next, fRD, tre[4][2]},
{OP_CONTD, 0xf3a50000, "rep movs", axDI, axCX, axCX, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 6 */
{OP_stos, 0xaa0000, "stos", Yb, axDI, al, axDI, xx, no, fRD, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_stos, 0xf3aa0000, "rep stos", Yb, axDI, al, axDI, axCX, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf3aa0000, "rep stos", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 7 */
{OP_stos, 0xab0000, "stos", Yv, axDI, eAX, axDI, xx, no, fRD, tre[6][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_stos, 0xf3ab0000, "rep stos", Yv, axDI, eAX, axDI, axCX, xop_next, fRD, tre[6][2]},
{OP_CONTD, 0xf3ab0000, "rep stos", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 8 */
{OP_lods, 0xac0000, "lods", al, axSI, Xb, axSI, xx, no, fRD, END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_lods, 0xf3ac0000, "rep lods", al, axSI, Xb, axSI, axCX, xop_next, fRD, END_LIST},
{OP_CONTD, 0xf3ac0000, "rep lods", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
{ /* rep extension 9 */
{OP_lods, 0xad0000, "lods", eAX, axSI, Xv, axSI, xx, no, fRD, tre[8][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_lods, 0xf3ad0000, "rep lods", eAX, axSI, Xv, axSI, axCX, xop_next, fRD, tre[8][2]},
{OP_CONTD, 0xf3ad0000, "rep lods", axCX, xx, xx, xx, xx, no, fRD, END_LIST},
},
};
const instr_info_t repne_extensions[][6] = {
{ /* repne extension 0 */
{OP_cmps, 0xa60000, "cmps", axSI, axDI, Xb, Yb, axSI, xop_next, (fW6|fRD), END_LIST},
{OP_CONTD, 0xa60000, "cmps", xx, xx, axDI, xx, xx, no, (fW6|fRD), END_LIST},
{OP_rep_cmps, 0xf3a60000, "rep cmps", axSI, axDI, Xb, Yb, axSI, xop_next, (fW6|fRD|fRZ), END_LIST},
{OP_CONTD, 0xf3a60000, "rep cmps", axCX, xx, axDI, axCX, xx, no, (fW6|fRD), END_LIST},
{OP_repne_cmps, 0xf2a60000, "repne cmps", axSI, axDI, Xb, Yb, axSI, xop_next, (fW6|fRD|fRZ), END_LIST},
{OP_CONTD, 0xf2a60000, "repne cmps", axCX, xx, axDI, axCX, xx, no, (fW6|fRD), END_LIST},
},
{ /* repne extension 1 */
{OP_cmps, 0xa70000, "cmps", axSI, axDI, Xv, Yv, axSI, xop_next, (fW6|fRD), tne[0][0]},
{OP_CONTD, 0xa70000, "cmps", xx, xx, axDI, xx, xx, no, (fW6|fRD), END_LIST},
{OP_rep_cmps, 0xf3a70000, "rep cmps", axSI, axDI, Xv, Yv, axSI, xop_next, (fW6|fRD|fRZ), tne[0][2]},
{OP_CONTD, 0xf3a70000, "rep cmps", axCX, xx, axDI, axCX, xx, no, (fW6|fRD), END_LIST},
{OP_repne_cmps, 0xf2a70000, "repne cmps", axSI, axDI, Xv, Yv, axSI, xop_next, (fW6|fRD|fRZ), tne[0][4]},
{OP_CONTD, 0xf2a70000, "repne cmps", axCX, xx, axDI, axCX, xx, no, (fW6|fRD), END_LIST},
},
{ /* repne extension 2 */
{OP_scas, 0xae0000, "scas", axDI, xx, Yb, al, axDI, no, (fW6|fRD), END_LIST},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_scas, 0xf3ae0000, "rep scas", axDI, axCX, Yb, al, axDI, xop_next, (fW6|fRD|fRZ), END_LIST},
{OP_CONTD, 0xf3ae0000, "rep scas", xx, xx, axCX, xx, xx, no, (fW6|fRD), END_LIST},
{OP_repne_scas, 0xf2ae0000, "repne scas", axDI, axCX, Yb, al, axDI, xop_next, (fW6|fRD|fRZ), END_LIST},
{OP_CONTD, 0xf2ae0000, "repne scas", xx, xx, axCX, xx, xx, no, (fW6|fRD), END_LIST},
},
{ /* repne extension 3 */
{OP_scas, 0xaf0000, "scas", axDI, xx, Yv, eAX, axDI, no, (fW6|fRD), tne[2][0]},
{INVALID, 0x00000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_rep_scas, 0xf3af0000, "rep scas", axDI, axCX, Yv, eAX, axDI, xop_next, (fW6|fRD|fRZ), tne[2][2]},
{OP_CONTD, 0xf3af0000, "rep scas", xx, xx, axCX, xx, xx, no, (fW6|fRD), END_LIST},
{OP_repne_scas, 0xf2af0000, "repne scas", axDI, axCX, Yv, eAX, axDI, xop_next, (fW6|fRD|fRZ), tne[2][4]},
{OP_CONTD, 0xf2af0000, "repne scas", xx, xx, axCX, xx, xx, no, (fW6|fRD), END_LIST},
}
};
/****************************************************************************
* Float instructions with ModR/M from 0x00 to 0xbf
* This is from Tables A-7, A-9, A-11, A-13, A-15, A-17, A-19, A-21
* I've added my own symbol '+' to indicate a float, and:
* 'x' to indicate extended real (80 bits)
* 'y' to indicate 14/28 byte value in memory
* 'z' to indicate 98/108 byte value in memory
*/
/* FIXME: I ignore fp stack changes, should we model that? */
const instr_info_t float_low_modrm[] = {
/* d8 */
{OP_fadd, 0xd80020, "fadd", st0, xx, Fd, st0, xx, mrm, x, tfl[0x20]}, /* 00 */
{OP_fmul, 0xd80021, "fmul", st0, xx, Fd, st0, xx, mrm, x, tfl[0x21]},
{OP_fcom, 0xd80022, "fcom", xx, xx, Fd, st0, xx, mrm, x, tfl[0x22]},
{OP_fcomp, 0xd80023, "fcomp", xx, xx, Fd, st0, xx, mrm, x, tfl[0x23]},
{OP_fsub, 0xd80024, "fsub", st0, xx, Fd, st0, xx, mrm, x, tfl[0x24]},
{OP_fsubr, 0xd80025, "fsubr", st0, xx, Fd, st0, xx, mrm, x, tfl[0x25]},
{OP_fdiv, 0xd80026, "fdiv", st0, xx, Fd, st0, xx, mrm, x, tfl[0x26]},
{OP_fdivr, 0xd80027, "fdivr", st0, xx, Fd, st0, xx, mrm, x, tfl[0x27]},
/* d9 */
{OP_fld, 0xd90020, "fld", st0, xx, Fd, xx, xx, mrm, x, tfl[0x1d]}, /* 08 */
{INVALID, 0xd90021, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fst, 0xd90022, "fst", Fd, xx, st0, xx, xx, mrm, x, tfl[0x2a]},
{OP_fstp, 0xd90023, "fstp", Fd, xx, st0, xx, xx, mrm, x, tfl[0x1f]},
{OP_fldenv, 0xd90024, "fldenv", xx, xx, Fy, xx, xx, mrm, x, END_LIST},
{OP_fldcw, 0xd90025, "fldcw", xx, xx, Fw, xx, xx, mrm, x, END_LIST},
{OP_fnstenv, 0xd90026, "fnstenv", Fy, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME: w/ preceding fwait instr, this is "fstenv"*/
{OP_fnstcw, 0xd90027, "fnstcw", Fw, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME: w/ preceding fwait instr, this is "fstcw"*/
/* da */
{OP_fiadd, 0xda0020, "fiadd", st0, xx, Md, st0, xx, mrm, x, tfl[0x30]}, /* 10 */
{OP_fimul, 0xda0021, "fimul", st0, xx, Md, st0, xx, mrm, x, tfl[0x31]},
{OP_ficom, 0xda0022, "ficom", st0, xx, Md, st0, xx, mrm, x, tfl[0x32]},
{OP_ficomp, 0xda0023, "ficomp", st0, xx, Md, st0, xx, mrm, x, tfl[0x33]},
{OP_fisub, 0xda0024, "fisub", st0, xx, Md, st0, xx, mrm, x, tfl[0x34]},
{OP_fisubr, 0xda0025, "fisubr", st0, xx, Md, st0, xx, mrm, x, tfl[0x35]},
{OP_fidiv, 0xda0026, "fidiv", st0, xx, Md, st0, xx, mrm, x, tfl[0x36]},
{OP_fidivr, 0xda0027, "fidivr", st0, xx, Md, st0, xx, mrm, x, tfl[0x37]},
/* db */
{OP_fild, 0xdb0020, "fild", st0, xx, Md, xx, xx, mrm, x, tfl[0x38]}, /* 18 */
{OP_fisttp, 0xdb0021, "fisttp", Md, xx, st0, xx, xx, no, x, tfl[0x39]},
{OP_fist, 0xdb0022, "fist", Md, xx, st0, xx, xx, mrm, x, tfl[0x3a]},
{OP_fistp, 0xdb0023, "fistp", Md, xx, st0, xx, xx, mrm, x, tfl[0x3b]},
{INVALID, 0xdb0024, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fld, 0xdb0025, "fld", st0, xx, Fx, xx, xx, mrm, x, tfl[0x28]},
{INVALID, 0xdb0026, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fstp, 0xdb0027, "fstp", Fx, xx, st0, xx, xx, mrm, x, tfl[0x2b]},
/* dc */
{OP_fadd, 0xdc0020, "fadd", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x00]}, /* 20 */
{OP_fmul, 0xdc0021, "fmul", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x08]},
{OP_fcom, 0xdc0022, "fcom", xx, xx, Fq, st0, xx, mrm, x, tfh[0][0x10]},
{OP_fcomp, 0xdc0023, "fcomp", xx, xx, Fq, st0, xx, mrm, x, tfh[0][0x18]},
{OP_fsub, 0xdc0024, "fsub", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x20]},
{OP_fsubr, 0xdc0025, "fsubr", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x28]},
{OP_fdiv, 0xdc0026, "fdiv", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x30]},
{OP_fdivr, 0xdc0027, "fdivr", st0, xx, Fq, st0, xx, mrm, x, tfh[0][0x38]},
/* dd */
{OP_fld, 0xdd0020, "fld", st0, xx, Fq, xx, xx, mrm, x, tfh[1][0x00]}, /* 28 */
{OP_fisttp, 0xdd0021, "fisttp", Mq, xx, st0, xx, xx, no, x, tfl[0x19]},
{OP_fst, 0xdd0022, "fst", Fq, xx, st0, xx, xx, mrm, x, tfh[5][0x10]},
{OP_fstp, 0xdd0023, "fstp", Fq, xx, st0, xx, xx, mrm, x, tfh[5][0x18]},
{OP_frstor,0xdd0024, "frstor", xx, xx, Fz, xx, xx, mrm, x, END_LIST},
{INVALID, 0xdd0025, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fnsave, 0xdd0026, "fnsave", Fz, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME:w/ preceding fwait instr, this is "fsave"*/
{OP_fnstsw, 0xdd0027, "fnstsw", Fw, xx, xx, xx, xx, mrm, x, tfh[7][0x20]},/*FIXME:w/ preceding fwait instr, this is "fstsw"*/
/* de */
{OP_fiadd, 0xde0020, "fiadd", st0, xx, Fw, st0, xx, mrm, x, END_LIST}, /* 30 */
{OP_fimul, 0xde0021, "fimul", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_ficom, 0xde0022, "ficom", xx, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_ficomp, 0xde0023, "ficomp", xx, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_fisub, 0xde0024, "fisub", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_fisubr, 0xde0025, "fisubr", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_fidiv, 0xde0026, "fidiv", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
{OP_fidivr, 0xde0027, "fidivr", st0, xx, Fw, st0, xx, mrm, x, END_LIST},
/* df */
{OP_fild, 0xdf0020, "fild", st0, xx, Fw, xx, xx, mrm, x, tfl[0x3d]}, /* 38 */
{OP_fisttp, 0xdf0021, "fisttp", Mw, xx, st0, xx, xx, no, x, END_LIST},
{OP_fist, 0xdf0022, "fist", Fw, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fistp, 0xdf0023, "fistp", Fw, xx, st0, xx, xx, mrm, x, tfl[0x3f]},
{OP_fbld, 0xdf0024, "fbld", st0, xx, Fx, xx, xx, mrm, x, END_LIST},
{OP_fild, 0xdf0025, "fild", st0, xx, Fq, xx, xx, mrm, x, END_LIST},
{OP_fbstp, 0xdf0026, "fbstp", Fx, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fistp, 0xdf0027, "fistp", Fq, xx, st0, xx, xx, mrm, x, END_LIST},
};
/****************************************************************************
* Float instructions with ModR/M above 0xbf
* This is from Tables A-8, A-10, A-12, A-14, A-16, A-18, A-20, A-22
*/
const instr_info_t float_high_modrm[][64] = {
{ /* d8 = [0] */
{OP_fadd, 0xd8c010, "fadd", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x01]}, /* c0 = [0x00] */
{OP_fadd, 0xd8c110, "fadd", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x02]},
{OP_fadd, 0xd8c210, "fadd", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x03]},
{OP_fadd, 0xd8c310, "fadd", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x04]},
{OP_fadd, 0xd8c410, "fadd", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x05]},
{OP_fadd, 0xd8c510, "fadd", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x06]},
{OP_fadd, 0xd8c610, "fadd", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x07]},
{OP_fadd, 0xd8c710, "fadd", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x00]},
{OP_fmul, 0xd8c810, "fmul", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x09]}, /* c8 = [0x08] */
{OP_fmul, 0xd8c910, "fmul", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x0a]},
{OP_fmul, 0xd8ca10, "fmul", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x0b]},
{OP_fmul, 0xd8cb10, "fmul", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x0c]},
{OP_fmul, 0xd8cc10, "fmul", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x0d]},
{OP_fmul, 0xd8cd10, "fmul", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x0e]},
{OP_fmul, 0xd8ce10, "fmul", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x0f]},
{OP_fmul, 0xd8cf10, "fmul", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x08]},
{OP_fcom, 0xd8d010, "fcom", xx, xx, st0, st0, xx, mrm, x, tfh[0][0x11]}, /* d0 = [0x10] */
{OP_fcom, 0xd8d110, "fcom", xx, xx, st0, st1, xx, mrm, x, tfh[0][0x12]},
{OP_fcom, 0xd8d210, "fcom", xx, xx, st0, st2, xx, mrm, x, tfh[0][0x13]},
{OP_fcom, 0xd8d310, "fcom", xx, xx, st0, st3, xx, mrm, x, tfh[0][0x14]},
{OP_fcom, 0xd8d410, "fcom", xx, xx, st0, st4, xx, mrm, x, tfh[0][0x15]},
{OP_fcom, 0xd8d510, "fcom", xx, xx, st0, st5, xx, mrm, x, tfh[0][0x16]},
{OP_fcom, 0xd8d610, "fcom", xx, xx, st0, st6, xx, mrm, x, tfh[0][0x17]},
{OP_fcom, 0xd8d710, "fcom", xx, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fcomp, 0xd8d810, "fcomp", xx, xx, st0, st0, xx, mrm, x, tfh[0][0x19]}, /* d8 = [0x18] */
{OP_fcomp, 0xd8d910, "fcomp", xx, xx, st0, st1, xx, mrm, x, tfh[0][0x1a]},
{OP_fcomp, 0xd8da10, "fcomp", xx, xx, st0, st2, xx, mrm, x, tfh[0][0x1b]},
{OP_fcomp, 0xd8db10, "fcomp", xx, xx, st0, st3, xx, mrm, x, tfh[0][0x1c]},
{OP_fcomp, 0xd8dc10, "fcomp", xx, xx, st0, st4, xx, mrm, x, tfh[0][0x1d]},
{OP_fcomp, 0xd8dd10, "fcomp", xx, xx, st0, st5, xx, mrm, x, tfh[0][0x1e]},
{OP_fcomp, 0xd8de10, "fcomp", xx, xx, st0, st6, xx, mrm, x, tfh[0][0x1f]},
{OP_fcomp, 0xd8df10, "fcomp", xx, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fsub, 0xd8e010, "fsub", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x21]}, /* e0 = [0x20] */
{OP_fsub, 0xd8e110, "fsub", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x22]},
{OP_fsub, 0xd8e210, "fsub", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x23]},
{OP_fsub, 0xd8e310, "fsub", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x24]},
{OP_fsub, 0xd8e410, "fsub", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x25]},
{OP_fsub, 0xd8e510, "fsub", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x26]},
{OP_fsub, 0xd8e610, "fsub", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x27]},
{OP_fsub, 0xd8e710, "fsub", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x28]},
{OP_fsubr, 0xd8e810, "fsubr", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x29]}, /* e8 = [0x28] */
{OP_fsubr, 0xd8e910, "fsubr", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x2a]},
{OP_fsubr, 0xd8ea10, "fsubr", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x2b]},
{OP_fsubr, 0xd8eb10, "fsubr", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x2c]},
{OP_fsubr, 0xd8ec10, "fsubr", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x2d]},
{OP_fsubr, 0xd8ed10, "fsubr", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x2e]},
{OP_fsubr, 0xd8ee10, "fsubr", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x2f]},
{OP_fsubr, 0xd8ef10, "fsubr", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x20]},
{OP_fdiv, 0xd8f010, "fdiv", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x31]}, /* f0 = [0x30] */
{OP_fdiv, 0xd8f110, "fdiv", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x32]},
{OP_fdiv, 0xd8f210, "fdiv", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x33]},
{OP_fdiv, 0xd8f310, "fdiv", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x34]},
{OP_fdiv, 0xd8f410, "fdiv", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x35]},
{OP_fdiv, 0xd8f510, "fdiv", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x36]},
{OP_fdiv, 0xd8f610, "fdiv", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x37]},
{OP_fdiv, 0xd8f710, "fdiv", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x38]},
{OP_fdivr, 0xd8f810, "fdivr", st0, xx, st0, st0, xx, mrm, x, tfh[0][0x39]}, /* f8 = [0x38] */
{OP_fdivr, 0xd8f910, "fdivr", st0, xx, st1, st0, xx, mrm, x, tfh[0][0x3a]},
{OP_fdivr, 0xd8fa10, "fdivr", st0, xx, st2, st0, xx, mrm, x, tfh[0][0x3b]},
{OP_fdivr, 0xd8fb10, "fdivr", st0, xx, st3, st0, xx, mrm, x, tfh[0][0x3c]},
{OP_fdivr, 0xd8fc10, "fdivr", st0, xx, st4, st0, xx, mrm, x, tfh[0][0x3d]},
{OP_fdivr, 0xd8fd10, "fdivr", st0, xx, st5, st0, xx, mrm, x, tfh[0][0x3e]},
{OP_fdivr, 0xd8fe10, "fdivr", st0, xx, st6, st0, xx, mrm, x, tfh[0][0x3f]},
{OP_fdivr, 0xd8ff10, "fdivr", st0, xx, st7, st0, xx, mrm, x, tfh[4][0x30]},
},
{ /* d9 = [1] */
{OP_fld, 0xd9c010, "fld", st0, xx, st0, xx, xx, mrm, x, tfh[1][0x01]}, /* c0 = [0x00] */
{OP_fld, 0xd9c110, "fld", st0, xx, st1, xx, xx, mrm, x, tfh[1][0x02]},
{OP_fld, 0xd9c210, "fld", st0, xx, st2, xx, xx, mrm, x, tfh[1][0x03]},
{OP_fld, 0xd9c310, "fld", st0, xx, st3, xx, xx, mrm, x, tfh[1][0x04]},
{OP_fld, 0xd9c410, "fld", st0, xx, st4, xx, xx, mrm, x, tfh[1][0x05]},
{OP_fld, 0xd9c510, "fld", st0, xx, st5, xx, xx, mrm, x, tfh[1][0x06]},
{OP_fld, 0xd9c610, "fld", st0, xx, st6, xx, xx, mrm, x, tfh[1][0x07]},
{OP_fld, 0xd9c710, "fld", st0, xx, st7, xx, xx, mrm, x, END_LIST},
{OP_fxch, 0xd9c810, "fxch", st0, st0, st0, st0, xx, mrm, x, tfh[1][0x09]}, /* c8 = [0x08] */
{OP_fxch, 0xd9c910, "fxch", st0, st1, st0, st1, xx, mrm, x, tfh[1][0x0a]},
{OP_fxch, 0xd9ca10, "fxch", st0, st2, st0, st2, xx, mrm, x, tfh[1][0x0b]},
{OP_fxch, 0xd9cb10, "fxch", st0, st3, st0, st3, xx, mrm, x, tfh[1][0x0c]},
{OP_fxch, 0xd9cc10, "fxch", st0, st4, st0, st4, xx, mrm, x, tfh[1][0x0d]},
{OP_fxch, 0xd9cd10, "fxch", st0, st5, st0, st5, xx, mrm, x, tfh[1][0x0e]},
{OP_fxch, 0xd9ce10, "fxch", st0, st6, st0, st6, xx, mrm, x, tfh[1][0x0f]},
{OP_fxch, 0xd9cf10, "fxch", st0, st7, st0, st7, xx, mrm, x, END_LIST},
{OP_fnop, 0xd9d010, "fnop", xx, xx, xx, xx, xx, mrm, x, END_LIST}, /* d0 = [0x10] */
{INVALID, 0xd9d110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9d710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* Undocumented. On sandpile.org as "fstp1". We assume an alias for fstp
* and do not include in the encode chain.
*/
{OP_fstp, 0xd9d810, "fstp", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* d8 = [0x18] */
{OP_fstp, 0xd9d910, "fstp", st1, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9da10, "fstp", st2, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9db10, "fstp", st3, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9dc10, "fstp", st4, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9dd10, "fstp", st5, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9de10, "fstp", st6, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xd9df10, "fstp", st7, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fchs, 0xd9e010, "fchs", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* e0 = [0x20] */
{OP_fabs, 0xd9e110, "fabs", st0, xx, st0, xx, xx, mrm, x, END_LIST},
{INVALID, 0xd9e210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9e310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_ftst, 0xd9e410, "ftst", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fxam, 0xd9e510, "fxam", xx, xx, st0, xx, xx, mrm, x, END_LIST},
{INVALID, 0xd9e610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xd9e710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fld1, 0xd9e810, "fld1", st0, xx, cF, xx, xx, mrm, x, END_LIST}, /* e8 = [0x28] */
{OP_fldl2t, 0xd9e910, "fldl2t", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldl2e, 0xd9ea10, "fldl2e", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldpi, 0xd9eb10, "fldpi", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldlg2, 0xd9ec10, "fldlg2", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldln2, 0xd9ed10, "fldln2", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{OP_fldz, 0xd9ee10, "fldz", st0, xx, cF, xx, xx, mrm, x, END_LIST},
{INVALID, 0xd9ef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_f2xm1, 0xd9f010, "f2xm1", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* f0 = [0x30] */
{OP_fyl2x, 0xd9f110, "fyl2x", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fptan, 0xd9f210, "fptan", st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fpatan, 0xd9f310, "fpatan", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fxtract,0xd9f410, "fxtract",st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fprem1, 0xd9f510, "fprem1", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fdecstp,0xd9f610, "fdecstp", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_fincstp,0xd9f710, "fincstp", xx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_fprem, 0xd9f810, "fprem", st0, st1, st0, st1, xx, mrm, x, END_LIST}, /* f8 = [0x38] */
{OP_fyl2xp1,0xd9f910, "fyl2xp1",st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fsqrt, 0xd9fa10, "fsqrt", st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fsincos,0xd9fb10, "fsincos",st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_frndint,0xd9fc10, "frndint",st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fscale, 0xd9fd10, "fscale", st0, xx, st1, st0, xx, mrm, x, END_LIST},
{OP_fsin, 0xd9fe10, "fsin", st0, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fcos, 0xd9ff10, "fcos", st0, xx, st0, xx, xx, mrm, x, END_LIST},
},
{ /* da = [2] */
{OP_fcmovb, 0xdac010, "fcmovb", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x01]}, /* c0 = [0x00] */
{OP_fcmovb, 0xdac110, "fcmovb", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x02]},
{OP_fcmovb, 0xdac210, "fcmovb", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x03]},
{OP_fcmovb, 0xdac310, "fcmovb", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x04]},
{OP_fcmovb, 0xdac410, "fcmovb", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x05]},
{OP_fcmovb, 0xdac510, "fcmovb", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x06]},
{OP_fcmovb, 0xdac610, "fcmovb", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x07]},
{OP_fcmovb, 0xdac710, "fcmovb", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmove, 0xdac810, "fcmove", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x09]}, /* c8 = [0x08] */
{OP_fcmove, 0xdac910, "fcmove", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0a]},
{OP_fcmove, 0xdaca10, "fcmove", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0b]},
{OP_fcmove, 0xdacb10, "fcmove", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0c]},
{OP_fcmove, 0xdacc10, "fcmove", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0d]},
{OP_fcmove, 0xdacd10, "fcmove", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0e]},
{OP_fcmove, 0xdace10, "fcmove", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x0f]},
{OP_fcmove, 0xdacf10, "fcmove", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovbe, 0xdad010, "fcmovbe", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x11]}, /* d0 = [0x10] */
{OP_fcmovbe, 0xdad110, "fcmovbe", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x12]},
{OP_fcmovbe, 0xdad210, "fcmovbe", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x13]},
{OP_fcmovbe, 0xdad310, "fcmovbe", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x14]},
{OP_fcmovbe, 0xdad410, "fcmovbe", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x15]},
{OP_fcmovbe, 0xdad510, "fcmovbe", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x16]},
{OP_fcmovbe, 0xdad610, "fcmovbe", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x17]},
{OP_fcmovbe, 0xdad710, "fcmovbe", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovu, 0xdad810, "fcmovu", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x19]}, /* d8 = [0x18] */
{OP_fcmovu, 0xdad910, "fcmovu", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1a]},
{OP_fcmovu, 0xdada10, "fcmovu", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1b]},
{OP_fcmovu, 0xdadb10, "fcmovu", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1c]},
{OP_fcmovu, 0xdadc10, "fcmovu", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1d]},
{OP_fcmovu, 0xdadd10, "fcmovu", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1e]},
{OP_fcmovu, 0xdade10, "fcmovu", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[2][0x1f]},
{OP_fcmovu, 0xdadf10, "fcmovu", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{INVALID, 0xdae010, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* e0 = [0x20] */
{INVALID, 0xdae110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdae810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* e8 = [0x28] */
{OP_fucompp, 0xdae910, "fucompp", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{INVALID, 0xdaea10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaeb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaec10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaed10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaee10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaef10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf010, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f0 = [0x30] */
{INVALID, 0xdaf110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaf810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f8 = [0x38] */
{INVALID, 0xdaf910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdafe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdaff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* db = [3] */
{OP_fcmovnb, 0xdbc010, "fcmovnb", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x01]}, /* c0 = [0x00] */
{OP_fcmovnb, 0xdbc110, "fcmovnb", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x02]},
{OP_fcmovnb, 0xdbc210, "fcmovnb", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x03]},
{OP_fcmovnb, 0xdbc310, "fcmovnb", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x04]},
{OP_fcmovnb, 0xdbc410, "fcmovnb", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x05]},
{OP_fcmovnb, 0xdbc510, "fcmovnb", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x06]},
{OP_fcmovnb, 0xdbc610, "fcmovnb", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x07]},
{OP_fcmovnb, 0xdbc710, "fcmovnb", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovne, 0xdbc810, "fcmovne", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x09]}, /* c8 = [0x08] */
{OP_fcmovne, 0xdbc910, "fcmovne", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0a]},
{OP_fcmovne, 0xdbca10, "fcmovne", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0b]},
{OP_fcmovne, 0xdbcb10, "fcmovne", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0c]},
{OP_fcmovne, 0xdbcc10, "fcmovne", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0d]},
{OP_fcmovne, 0xdbcd10, "fcmovne", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0e]},
{OP_fcmovne, 0xdbce10, "fcmovne", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x0f]},
{OP_fcmovne, 0xdbcf10, "fcmovne", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovnbe, 0xdbd010, "fcmovnbe", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x12]}, /* d0 = [0x10] */
{OP_fcmovnbe, 0xdbd110, "fcmovnbe", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x12]},
{OP_fcmovnbe, 0xdbd210, "fcmovnbe", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x13]},
{OP_fcmovnbe, 0xdbd310, "fcmovnbe", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x14]},
{OP_fcmovnbe, 0xdbd410, "fcmovnbe", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x15]},
{OP_fcmovnbe, 0xdbd510, "fcmovnbe", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x16]},
{OP_fcmovnbe, 0xdbd610, "fcmovnbe", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x17]},
{OP_fcmovnbe, 0xdbd710, "fcmovnbe", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{OP_fcmovnu, 0xdbd810, "fcmovnu", st0, xx, st0, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x19]}, /* d8 = [0x18] */
{OP_fcmovnu, 0xdbd910, "fcmovnu", st0, xx, st1, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1a]},
{OP_fcmovnu, 0xdbda10, "fcmovnu", st0, xx, st2, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1b]},
{OP_fcmovnu, 0xdbdb10, "fcmovnu", st0, xx, st3, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1c]},
{OP_fcmovnu, 0xdbdc10, "fcmovnu", st0, xx, st4, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1d]},
{OP_fcmovnu, 0xdbdd10, "fcmovnu", st0, xx, st5, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1e]},
{OP_fcmovnu, 0xdbde10, "fcmovnu", st0, xx, st6, xx, xx, mrm|predcc, (fRC|fRP|fRZ), tfh[3][0x1f]},
{OP_fcmovnu, 0xdbdf10, "fcmovnu", st0, xx, st7, xx, xx, mrm|predcc, (fRC|fRP|fRZ), END_LIST},
{INVALID, 0xdbe010, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* e0 = [0x20] */
{INVALID, 0xdbe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fnclex, 0xdbe210, "fnclex", xx, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME: w/ preceding fwait instr, called "fclex"*/
{OP_fninit, 0xdbe310, "fninit", xx, xx, xx, xx, xx, mrm, x, END_LIST},/*FIXME: w/ preceding fwait instr, called "finit"*/
{INVALID, 0xdbe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fucomi, 0xdbe810, "fucomi", xx, xx, st0, st0, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x29]}, /* e8 = [0x28] */
{OP_fucomi, 0xdbe910, "fucomi", xx, xx, st0, st1, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2a]},
{OP_fucomi, 0xdbea10, "fucomi", xx, xx, st0, st2, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2b]},
{OP_fucomi, 0xdbeb10, "fucomi", xx, xx, st0, st3, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2c]},
{OP_fucomi, 0xdbec10, "fucomi", xx, xx, st0, st4, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2d]},
{OP_fucomi, 0xdbed10, "fucomi", xx, xx, st0, st5, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2e]},
{OP_fucomi, 0xdbee10, "fucomi", xx, xx, st0, st6, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x2f]},
{OP_fucomi, 0xdbef10, "fucomi", xx, xx, st0, st7, xx, mrm, (fWC|fWP|fWZ), END_LIST},
{OP_fcomi, 0xdbf010, "fcomi", xx, xx, st0, st0, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x31]}, /* f0 = [0x30] */
{OP_fcomi, 0xdbf110, "fcomi", xx, xx, st0, st1, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x32]},
{OP_fcomi, 0xdbf210, "fcomi", xx, xx, st0, st2, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x33]},
{OP_fcomi, 0xdbf310, "fcomi", xx, xx, st0, st3, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x34]},
{OP_fcomi, 0xdbf410, "fcomi", xx, xx, st0, st4, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x35]},
{OP_fcomi, 0xdbf510, "fcomi", xx, xx, st0, st5, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x36]},
{OP_fcomi, 0xdbf610, "fcomi", xx, xx, st0, st6, xx, mrm, (fWC|fWP|fWZ), tfh[3][0x37]},
{OP_fcomi, 0xdbf710, "fcomi", xx, xx, st0, st7, xx, mrm, (fWC|fWP|fWZ), END_LIST},
{INVALID, 0xdbf810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f8 = [0x38] */
{INVALID, 0xdbf910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbfe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdbff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* dc = [4] */
{OP_fadd, 0xdcc010, "fadd", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x01]}, /* c0 = [0x00] */
{OP_fadd, 0xdcc110, "fadd", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x02]},
{OP_fadd, 0xdcc210, "fadd", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x03]},
{OP_fadd, 0xdcc310, "fadd", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x04]},
{OP_fadd, 0xdcc410, "fadd", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x05]},
{OP_fadd, 0xdcc510, "fadd", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x06]},
{OP_fadd, 0xdcc610, "fadd", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x07]},
{OP_fadd, 0xdcc710, "fadd", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fmul, 0xdcc810, "fmul", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x09]}, /* c8 = [0x08] */
{OP_fmul, 0xdcc910, "fmul", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x0a]},
{OP_fmul, 0xdcca10, "fmul", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x0b]},
{OP_fmul, 0xdccb10, "fmul", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x0c]},
{OP_fmul, 0xdccc10, "fmul", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x0d]},
{OP_fmul, 0xdccd10, "fmul", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x0e]},
{OP_fmul, 0xdcce10, "fmul", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x0f]},
{OP_fmul, 0xdccf10, "fmul", st7, xx, st0, st7, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fcom2". We assume an alias for fcom
* and do not include in the encode chain.
*/
{OP_fcom, 0xdcd010, "fcom", xx, xx, st0, st0, xx, mrm, x, END_LIST}, /* d0 = [0x10] */
{OP_fcom, 0xdcd110, "fcom", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd210, "fcom", xx, xx, st0, st2, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd310, "fcom", xx, xx, st0, st3, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd410, "fcom", xx, xx, st0, st4, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd510, "fcom", xx, xx, st0, st5, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd610, "fcom", xx, xx, st0, st6, xx, mrm, x, END_LIST},
{OP_fcom, 0xdcd710, "fcom", xx, xx, st0, st7, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fcomp3". We assume an alias for fcomp
* and do not include in the encode chain.
*/
{OP_fcomp, 0xdcd810, "fcomp", xx, xx, st0, st0, xx, mrm, x, END_LIST}, /* d8 = [0x18] */
{OP_fcomp, 0xdcd910, "fcomp", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcda10, "fcomp", xx, xx, st0, st2, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcdb10, "fcomp", xx, xx, st0, st3, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcdc10, "fcomp", xx, xx, st0, st4, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcdd10, "fcomp", xx, xx, st0, st5, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcde10, "fcomp", xx, xx, st0, st6, xx, mrm, x, END_LIST},
{OP_fcomp, 0xdcdf10, "fcomp", xx, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fsubr, 0xdce010, "fsubr", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x21]}, /* e0 = [0x20] */
{OP_fsubr, 0xdce110, "fsubr", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x22]},
{OP_fsubr, 0xdce210, "fsubr", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x23]},
{OP_fsubr, 0xdce310, "fsubr", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x24]},
{OP_fsubr, 0xdce410, "fsubr", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x25]},
{OP_fsubr, 0xdce510, "fsubr", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x26]},
{OP_fsubr, 0xdce610, "fsubr", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x27]},
{OP_fsubr, 0xdce710, "fsubr", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fsub, 0xdce810, "fsub", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x29]}, /* e8 = [0x28] */
{OP_fsub, 0xdce910, "fsub", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x2a]},
{OP_fsub, 0xdcea10, "fsub", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x2b]},
{OP_fsub, 0xdceb10, "fsub", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x2c]},
{OP_fsub, 0xdcec10, "fsub", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x2d]},
{OP_fsub, 0xdced10, "fsub", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x2e]},
{OP_fsub, 0xdcee10, "fsub", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x2f]},
{OP_fsub, 0xdcef10, "fsub", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fdivr, 0xdcf010, "fdivr", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x31]}, /* f0 = [0x30] */
{OP_fdivr, 0xdcf110, "fdivr", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x32]},
{OP_fdivr, 0xdcf210, "fdivr", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x33]},
{OP_fdivr, 0xdcf310, "fdivr", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x34]},
{OP_fdivr, 0xdcf410, "fdivr", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x35]},
{OP_fdivr, 0xdcf510, "fdivr", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x36]},
{OP_fdivr, 0xdcf610, "fdivr", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x37]},
{OP_fdivr, 0xdcf710, "fdivr", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fdiv, 0xdcf810, "fdiv", st0, xx, st0, st0, xx, mrm, x, tfh[4][0x39]}, /* f8 = [0x38] */
{OP_fdiv, 0xdcf910, "fdiv", st1, xx, st0, st1, xx, mrm, x, tfh[4][0x3a]},
{OP_fdiv, 0xdcfa10, "fdiv", st2, xx, st0, st2, xx, mrm, x, tfh[4][0x3b]},
{OP_fdiv, 0xdcfb10, "fdiv", st3, xx, st0, st3, xx, mrm, x, tfh[4][0x3c]},
{OP_fdiv, 0xdcfc10, "fdiv", st4, xx, st0, st4, xx, mrm, x, tfh[4][0x3d]},
{OP_fdiv, 0xdcfd10, "fdiv", st5, xx, st0, st5, xx, mrm, x, tfh[4][0x3e]},
{OP_fdiv, 0xdcfe10, "fdiv", st6, xx, st0, st6, xx, mrm, x, tfh[4][0x3f]},
{OP_fdiv, 0xdcff10, "fdiv", st7, xx, st0, st7, xx, mrm, x, END_LIST},
},
{ /* dd = [5] */
{OP_ffree, 0xddc010, "ffree", st0, xx, xx, xx, xx, mrm, x, tfh[5][0x01]}, /* c0 = [0x00] */
{OP_ffree, 0xddc110, "ffree", st1, xx, xx, xx, xx, mrm, x, tfh[5][0x02]},
{OP_ffree, 0xddc210, "ffree", st2, xx, xx, xx, xx, mrm, x, tfh[5][0x03]},
{OP_ffree, 0xddc310, "ffree", st3, xx, xx, xx, xx, mrm, x, tfh[5][0x04]},
{OP_ffree, 0xddc410, "ffree", st4, xx, xx, xx, xx, mrm, x, tfh[5][0x05]},
{OP_ffree, 0xddc510, "ffree", st5, xx, xx, xx, xx, mrm, x, tfh[5][0x06]},
{OP_ffree, 0xddc610, "ffree", st6, xx, xx, xx, xx, mrm, x, tfh[5][0x07]},
{OP_ffree, 0xddc710, "ffree", st7, xx, xx, xx, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fxch4". We assume an alias for fxch
* and do not include in the encode chain.
*/
{OP_fxch, 0xddc810, "fxch", st0, st0, st0, st0, xx, mrm, x, END_LIST}, /* c8 = [0x08] */
{OP_fxch, 0xddc910, "fxch", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fxch, 0xddca10, "fxch", st0, st2, st0, st2, xx, mrm, x, END_LIST},
{OP_fxch, 0xddcb10, "fxch", st0, st3, st0, st3, xx, mrm, x, END_LIST},
{OP_fxch, 0xddcc10, "fxch", st0, st4, st0, st4, xx, mrm, x, END_LIST},
{OP_fxch, 0xddcd10, "fxch", st0, st5, st0, st5, xx, mrm, x, END_LIST},
{OP_fxch, 0xddce10, "fxch", st0, st6, st0, st6, xx, mrm, x, END_LIST},
{OP_fxch, 0xddcf10, "fxch", st0, st7, st0, st7, xx, mrm, x, END_LIST},
{OP_fst, 0xddd010, "fst", st0, xx, st0, xx, xx, mrm, x, tfh[5][0x11]}, /* d0 = [0x10] */
{OP_fst, 0xddd110, "fst", st1, xx, st0, xx, xx, mrm, x, tfh[5][0x12]},
{OP_fst, 0xddd210, "fst", st2, xx, st0, xx, xx, mrm, x, tfh[5][0x13]},
{OP_fst, 0xddd310, "fst", st3, xx, st0, xx, xx, mrm, x, tfh[5][0x14]},
{OP_fst, 0xddd410, "fst", st4, xx, st0, xx, xx, mrm, x, tfh[5][0x15]},
{OP_fst, 0xddd510, "fst", st5, xx, st0, xx, xx, mrm, x, tfh[5][0x16]},
{OP_fst, 0xddd610, "fst", st6, xx, st0, xx, xx, mrm, x, tfh[5][0x17]},
{OP_fst, 0xddd710, "fst", st7, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xddd810, "fstp", st0, xx, st0, xx, xx, mrm, x, tfh[5][0x19]}, /* d8 = [0x18] */
{OP_fstp, 0xddd910, "fstp", st1, xx, st0, xx, xx, mrm, x, tfh[5][0x1a]},
{OP_fstp, 0xddda10, "fstp", st2, xx, st0, xx, xx, mrm, x, tfh[5][0x1b]},
{OP_fstp, 0xdddb10, "fstp", st3, xx, st0, xx, xx, mrm, x, tfh[5][0x1c]},
{OP_fstp, 0xdddc10, "fstp", st4, xx, st0, xx, xx, mrm, x, tfh[5][0x1d]},
{OP_fstp, 0xdddd10, "fstp", st5, xx, st0, xx, xx, mrm, x, tfh[5][0x1e]},
{OP_fstp, 0xddde10, "fstp", st6, xx, st0, xx, xx, mrm, x, tfh[5][0x1f]},
{OP_fstp, 0xdddf10, "fstp", st7, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fucom, 0xdde010, "fucom", xx, xx, st0, st0, xx, mrm, x, tfh[5][0x21]}, /* e0 = [0x20] */
{OP_fucom, 0xdde110, "fucom", xx, xx, st1, st0, xx, mrm, x, tfh[5][0x22]},
{OP_fucom, 0xdde210, "fucom", xx, xx, st2, st0, xx, mrm, x, tfh[5][0x23]},
{OP_fucom, 0xdde310, "fucom", xx, xx, st3, st0, xx, mrm, x, tfh[5][0x24]},
{OP_fucom, 0xdde410, "fucom", xx, xx, st4, st0, xx, mrm, x, tfh[5][0x25]},
{OP_fucom, 0xdde510, "fucom", xx, xx, st5, st0, xx, mrm, x, tfh[5][0x26]},
{OP_fucom, 0xdde610, "fucom", xx, xx, st6, st0, xx, mrm, x, tfh[5][0x27]},
{OP_fucom, 0xdde710, "fucom", xx, xx, st7, st0, xx, mrm, x, END_LIST},
{OP_fucomp, 0xdde810, "fucomp", xx, xx, st0, st0, xx, mrm, x, tfh[5][0x29]}, /* e8 = [0x28] */
{OP_fucomp, 0xdde910, "fucomp", xx, xx, st1, st0, xx, mrm, x, tfh[5][0x2a]},
{OP_fucomp, 0xddea10, "fucomp", xx, xx, st2, st0, xx, mrm, x, tfh[5][0x2b]},
{OP_fucomp, 0xddeb10, "fucomp", xx, xx, st3, st0, xx, mrm, x, tfh[5][0x2c]},
{OP_fucomp, 0xddec10, "fucomp", xx, xx, st4, st0, xx, mrm, x, tfh[5][0x2d]},
{OP_fucomp, 0xdded10, "fucomp", xx, xx, st5, st0, xx, mrm, x, tfh[5][0x2e]},
{OP_fucomp, 0xddee10, "fucomp", xx, xx, st6, st0, xx, mrm, x, tfh[5][0x2f]},
{OP_fucomp, 0xddef10, "fucomp", xx, xx, st7, st0, xx, mrm, x, END_LIST},
{INVALID, 0xddf010, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f0 = [0x30] */
{INVALID, 0xddf110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddf810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f8 = [0x38] */
{INVALID, 0xddf910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddfe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xddff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
{ /* de = [6]*/
{OP_faddp, 0xdec010, "faddp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x01]}, /* c0 = [0x00] */
{OP_faddp, 0xdec110, "faddp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x02]},
{OP_faddp, 0xdec210, "faddp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x03]},
{OP_faddp, 0xdec310, "faddp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x04]},
{OP_faddp, 0xdec410, "faddp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x05]},
{OP_faddp, 0xdec510, "faddp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x06]},
{OP_faddp, 0xdec610, "faddp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x07]},
{OP_faddp, 0xdec710, "faddp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fmulp, 0xdec810, "fmulp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x09]}, /* c8 = [0x08] */
{OP_fmulp, 0xdec910, "fmulp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x0a]},
{OP_fmulp, 0xdeca10, "fmulp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x0b]},
{OP_fmulp, 0xdecb10, "fmulp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x0c]},
{OP_fmulp, 0xdecc10, "fmulp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x0d]},
{OP_fmulp, 0xdecd10, "fmulp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x0e]},
{OP_fmulp, 0xdece10, "fmulp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x0f]},
{OP_fmulp, 0xdecf10, "fmulp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fcomp5". We assume an alias for fcomp
* and do not include in the encode chain.
*/
{OP_fcomp, 0xded010, "fcomp", xx, xx, st0, st0, xx, mrm, x, END_LIST}, /* d0 = [0x10] */
{OP_fcomp, 0xded110, "fcomp", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded210, "fcomp", xx, xx, st0, st2, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded310, "fcomp", xx, xx, st0, st3, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded410, "fcomp", xx, xx, st0, st4, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded510, "fcomp", xx, xx, st0, st5, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded610, "fcomp", xx, xx, st0, st6, xx, mrm, x, END_LIST},
{OP_fcomp, 0xded710, "fcomp", xx, xx, st0, st7, xx, mrm, x, END_LIST},
{INVALID, 0xded810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* d8 = [0x18] */
{OP_fcompp, 0xded910, "fcompp", xx, xx, st0, st1, xx, mrm, x, END_LIST},
{INVALID, 0xdeda10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdedb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdedc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdedd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdede10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdedf10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fsubrp, 0xdee010, "fsubrp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x21]}, /* e0 = [0x20] */
{OP_fsubrp, 0xdee110, "fsubrp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x22]},
{OP_fsubrp, 0xdee210, "fsubrp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x23]},
{OP_fsubrp, 0xdee310, "fsubrp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x24]},
{OP_fsubrp, 0xdee410, "fsubrp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x25]},
{OP_fsubrp, 0xdee510, "fsubrp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x26]},
{OP_fsubrp, 0xdee610, "fsubrp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x27]},
{OP_fsubrp, 0xdee710, "fsubrp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fsubp, 0xdee810, "fsubp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x29]}, /* e8 = [0x28] */
{OP_fsubp, 0xdee910, "fsubp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x2a]},
{OP_fsubp, 0xdeea10, "fsubp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x2b]},
{OP_fsubp, 0xdeeb10, "fsubp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x2c]},
{OP_fsubp, 0xdeec10, "fsubp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x2d]},
{OP_fsubp, 0xdeed10, "fsubp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x2e]},
{OP_fsubp, 0xdeee10, "fsubp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x2f]},
{OP_fsubp, 0xdeef10, "fsubp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fdivrp, 0xdef010, "fdivrp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x31]}, /* f0 = [0x30] */
{OP_fdivrp, 0xdef110, "fdivrp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x32]},
{OP_fdivrp, 0xdef210, "fdivrp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x33]},
{OP_fdivrp, 0xdef310, "fdivrp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x34]},
{OP_fdivrp, 0xdef410, "fdivrp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x35]},
{OP_fdivrp, 0xdef510, "fdivrp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x36]},
{OP_fdivrp, 0xdef610, "fdivrp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x37]},
{OP_fdivrp, 0xdef710, "fdivrp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
{OP_fdivp, 0xdef810, "fdivp", st0, xx, st0, st0, xx, mrm, x, tfh[6][0x39]}, /* f8 = [0x38] */
{OP_fdivp, 0xdef910, "fdivp", st1, xx, st0, st1, xx, mrm, x, tfh[6][0x3a]},
{OP_fdivp, 0xdefa10, "fdivp", st2, xx, st0, st2, xx, mrm, x, tfh[6][0x3b]},
{OP_fdivp, 0xdefb10, "fdivp", st3, xx, st0, st3, xx, mrm, x, tfh[6][0x3c]},
{OP_fdivp, 0xdefc10, "fdivp", st4, xx, st0, st4, xx, mrm, x, tfh[6][0x3d]},
{OP_fdivp, 0xdefd10, "fdivp", st5, xx, st0, st5, xx, mrm, x, tfh[6][0x3e]},
{OP_fdivp, 0xdefe10, "fdivp", st6, xx, st0, st6, xx, mrm, x, tfh[6][0x3f]},
{OP_fdivp, 0xdeff10, "fdivp", st7, xx, st0, st7, xx, mrm, x, END_LIST},
},
{ /* df = [7] */
/* Undocumented by Intel, but is on p152 of "AMD Athlon
* Processor x86 Code Optimization Guide."
*/
{OP_ffreep, 0xdfc010, "ffreep", st0, xx, xx, xx, xx, mrm, x, tfh[7][0x01]}, /* c0 = [0x00] */
{OP_ffreep, 0xdfc110, "ffreep", st1, xx, xx, xx, xx, mrm, x, tfh[7][0x02]},
{OP_ffreep, 0xdfc210, "ffreep", st2, xx, xx, xx, xx, mrm, x, tfh[7][0x03]},
{OP_ffreep, 0xdfc310, "ffreep", st3, xx, xx, xx, xx, mrm, x, tfh[7][0x04]},
{OP_ffreep, 0xdfc410, "ffreep", st4, xx, xx, xx, xx, mrm, x, tfh[7][0x05]},
{OP_ffreep, 0xdfc510, "ffreep", st5, xx, xx, xx, xx, mrm, x, tfh[7][0x06]},
{OP_ffreep, 0xdfc610, "ffreep", st6, xx, xx, xx, xx, mrm, x, tfh[7][0x07]},
{OP_ffreep, 0xdfc710, "ffreep", st7, xx, xx, xx, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fxch7". We assume an alias for fxch
* and do not include in the encode chain.
*/
{OP_fxch, 0xdfc810, "fxch", st0, st0, st0, st0, xx, mrm, x, END_LIST}, /* c8 = [0x08] */
{OP_fxch, 0xdfc910, "fxch", st0, st1, st0, st1, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfca10, "fxch", st0, st2, st0, st2, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfcb10, "fxch", st0, st3, st0, st3, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfcc10, "fxch", st0, st4, st0, st4, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfcd10, "fxch", st0, st5, st0, st5, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfce10, "fxch", st0, st6, st0, st6, xx, mrm, x, END_LIST},
{OP_fxch, 0xdfcf10, "fxch", st0, st7, st0, st7, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fstp8". We assume an alias for fstp
* and do not include in the encode chain.
*/
{OP_fstp, 0xdfd010, "fstp", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* d0 = [0x10] */
{OP_fstp, 0xdfd110, "fstp", st1, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd210, "fstp", st2, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd310, "fstp", st3, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd410, "fstp", st4, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd510, "fstp", st5, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd610, "fstp", st6, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfd710, "fstp", st7, xx, st0, xx, xx, mrm, x, END_LIST},
/* Undocumented. On sandpile.org as "fstp9". We assume an alias for fstp
* and do not include in the encode chain.
*/
{OP_fstp, 0xdfd810, "fstp", st0, xx, st0, xx, xx, mrm, x, END_LIST}, /* d8 = [0x18] */
{OP_fstp, 0xdfd910, "fstp", st1, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfda10, "fstp", st2, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfdb10, "fstp", st3, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfdc10, "fstp", st4, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfdd10, "fstp", st5, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfde10, "fstp", st6, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fstp, 0xdfdf10, "fstp", st7, xx, st0, xx, xx, mrm, x, END_LIST},
{OP_fnstsw, 0xdfe010, "fnstsw", ax, xx, xx, xx, xx, mrm, x, END_LIST}, /* e0 = [0x20] */ /*FIXME:w/ preceding fwait instr, this is "fstsw"*/
{INVALID, 0xdfe110, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe210, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe510, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfe710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_fucomip, 0xdfe810, "fucomip", xx, xx, st0, st0, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x29]}, /* e8 = [0x28] */
{OP_fucomip, 0xdfe910, "fucomip", xx, xx, st0, st1, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2a]},
{OP_fucomip, 0xdfea10, "fucomip", xx, xx, st0, st2, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2b]},
{OP_fucomip, 0xdfeb10, "fucomip", xx, xx, st0, st3, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2c]},
{OP_fucomip, 0xdfec10, "fucomip", xx, xx, st0, st4, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2d]},
{OP_fucomip, 0xdfed10, "fucomip", xx, xx, st0, st5, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2e]},
{OP_fucomip, 0xdfee10, "fucomip", xx, xx, st0, st6, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x2f]},
{OP_fucomip, 0xdfef10, "fucomip", xx, xx, st0, st7, xx, mrm, (fWC|fWP|fWZ), END_LIST},
{OP_fcomip, 0xdff010, "fcomip", xx, xx, st0, st0, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x31]}, /* f0 = [0x30] */
{OP_fcomip, 0xdff110, "fcomip", xx, xx, st0, st1, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x32]},
{OP_fcomip, 0xdff210, "fcomip", xx, xx, st0, st2, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x33]},
{OP_fcomip, 0xdff310, "fcomip", xx, xx, st0, st3, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x34]},
{OP_fcomip, 0xdff410, "fcomip", xx, xx, st0, st4, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x35]},
{OP_fcomip, 0xdff510, "fcomip", xx, xx, st0, st5, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x36]},
{OP_fcomip, 0xdff610, "fcomip", xx, xx, st0, st6, xx, mrm, (fWC|fWP|fWZ), tfh[7][0x37]},
{OP_fcomip, 0xdff710, "fcomip", xx, xx, st0, st7, xx, mrm, (fWC|fWP|fWZ), END_LIST},
{INVALID, 0xdff810, "(bad)", xx, xx, xx, xx, xx, no, x, NA}, /* f8 = [0x38] */
{INVALID, 0xdff910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffa10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffb10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffc10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffd10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdffe10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xdfff10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/****************************************************************************
* Suffix extensions: 3DNow! and 3DNow!+
* Since there are only 24 of them, we save space by having a
* table of 256 indices instead of 256 instr_info_t structs.
*/
const byte suffix_index[256] = {
/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20,18, 0, 0, /* 0 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21,19, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,22, 0, 0, 0,23, 0, /* 8 */
4, 0, 0, 0, 7, 0,10,13, 0, 0,16, 0, 0, 0, 2, 0, /* 9 */
5, 0, 0, 0, 8, 0,11,14, 0, 0,17, 0, 0, 0, 3, 0, /* A */
6, 0, 0, 0, 9, 0,12,15, 0, 0, 0,24, 0, 0, 0, 1, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
const instr_info_t suffix_extensions[] = {
/* Rather than forging an exception let's anticipate future additions: we know
* (pretty sure anyway) that they'll have the same length and operand structure.
* Won't encode properly from Level 4 but that's ok.
*/
{OP_unknown_3dnow, 0x000f0f90, "unknown 3DNow",
Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 0*/
{OP_pavgusb , 0xbf0f0f90, "pavgusb", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 1*/
{OP_pfadd , 0x9e0f0f90, "pfadd", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 2*/
{OP_pfacc , 0xae0f0f90, "pfacc", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 3*/
{OP_pfcmpge , 0x900f0f90, "pfcmpge", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 4*/
{OP_pfcmpgt , 0xa00f0f90, "pfcmpgt", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 5*/
{OP_pfcmpeq , 0xb00f0f90, "pfcmpeq", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 6*/
{OP_pfmin , 0x940f0f90, "pfmin" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 7*/
{OP_pfmax , 0xa40f0f90, "pfmax" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 8*/
{OP_pfmul , 0xb40f0f90, "pfmul" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/* 9*/
{OP_pfrcp , 0x960f0f90, "pfrcp" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*10*/
{OP_pfrcpit1, 0xa60f0f90, "pfrcpit1", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*11*/
{OP_pfrcpit2, 0xb60f0f90, "pfrcpit2", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*12*/
{OP_pfrsqrt , 0x970f0f90, "pfrsqrt", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*13*/
{OP_pfrsqit1, 0xa70f0f90, "pfrsqit1", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*14*/
{OP_pmulhrw , 0xb70f0f90, "pmulhrw", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*15*/
{OP_pfsub , 0x9a0f0f90, "pfsub" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*16*/
{OP_pfsubr , 0xaa0f0f90, "pfsubr" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*17*/
{OP_pi2fd , 0x0d0f0f90, "pi2fd" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*18*/
{OP_pf2id , 0x1d0f0f90, "pf2id", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*19*/
{OP_pi2fw , 0x0c0f0f90, "pi2fw" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*20*/
{OP_pf2iw , 0x1c0f0f90, "pf2iw", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*21*/
{OP_pfnacc , 0x8a0f0f90, "pfnacc" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*22*/
{OP_pfpnacc , 0x8e0f0f90, "pfpnacc", Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*23*/
{OP_pswapd , 0xbb0f0f90, "pswapd" , Pq, xx, Qq, Pq, xx, mrm, x, END_LIST},/*24*/
};
/****************************************************************************
* To handle more than 2 dests or 3 sources we chain on extra instructions.
* All cases where we have extra operands are single-encoding-only instructions,
* so we use the list field to point to here.
* N.B.: the size of this table is hardcoded in decode.c.
* Also, only implicit operands are in these instruction extensions!!!
*/
const instr_info_t extra_operands[] =
{
/* 0x00 */
{OP_CONTD, 0x000000, "<pusha cont'd>", xx, xx, eCX, eDX, eBP, xop, x, exop[0x01]},
{OP_CONTD, 0x000000, "<pusha cont'd>", xx, xx, eSI, eDI, xx, no, x, END_LIST},
/* 0x02 */
{OP_CONTD, 0x000000, "<popa cont'd>", eBX, eCX, xx, xx, xx, xop, x, exop[0x03]},
{OP_CONTD, 0x000000, "<popa cont'd>", eDX, eBP, xx, xx, xx, xop, x, exop[0x04]},
{OP_CONTD, 0x000000, "<popa cont'd>", eSI, eDI, xx, xx, xx, no, x, END_LIST},
/* 0x05 */
{OP_CONTD, 0x000000, "<enter cont'd>", xbp, xx, xbp, xx, xx, no, x, END_LIST},
/* 0x06 */
{OP_CONTD, 0x000000, "<cpuid cont'd>", ecx, edx, xx, xx, xx, no, x, END_LIST},
/* 0x07 */
{OP_CONTD, 0x000000, "<cmpxchg8b cont'd>", eDX, xx, eCX, eBX, xx, mrm, fWZ, END_LIST},
{OP_CONTD,0x663a6018, "<pcmpestrm cont'd", xx, xx, eax, edx, xx, mrm|reqp, fW6, END_LIST},
{OP_CONTD,0x663a6018, "<pcmpestri cont'd", xx, xx, eax, edx, xx, mrm|reqp, fW6, END_LIST},
/* 10 */
{OP_CONTD,0xf90f0177, "<rdtscp cont'd>", ecx, xx, xx, xx, xx, mrm, x, END_LIST},
{OP_CONTD,0x663a6018, "<vpcmpestrm cont'd", xx, xx, eax, edx, xx, mrm|vex|reqp, fW6, END_LIST},
{OP_CONTD,0x663a6018, "<vpcmpestri cont'd", xx, xx, eax, edx, xx, mrm|vex|reqp, fW6, END_LIST},
{OP_CONTD,0x0f3710, "<getsec cont'd", ecx, xx, xx, xx, xx, predcx, x, END_LIST},
};
/* clang-format on */
| 1 | 16,883 | wrong code: should be `Wh_e` | DynamoRIO-dynamorio | c |
@@ -582,10 +582,11 @@ static void makeLabelTag (vString *const label)
static lineType getLineType (void)
{
- vString *label = vStringNew ();
+ static vString *label = NULL;
int column = 0;
lineType type = LTYPE_UNDETERMINED;
+ label = vStringNewOrClear (label);
do /* read in first 6 "margin" characters */
{
int c = getcFromInputFile (); | 1 | /*
* Copyright (c) 1998-2003, Darren Hiebert
*
* This source code is released for free distribution under the terms of the
* GNU General Public License version 2 or (at your option) any later version.
*
* This module contains functions for generating tags for Fortran language
* files.
*/
/*
* INCLUDE FILES
*/
#include "general.h" /* must always come first */
#include <string.h>
#include <limits.h>
#include <ctype.h> /* to define tolower () */
#include "debug.h"
#include "entry.h"
#include "keyword.h"
#include "options.h"
#include "parse.h"
#include "read.h"
#include "routines.h"
#include "vstring.h"
#include "xtag.h"
/*
* MACROS
*/
#define isident(c) (isalnum(c) || (c) == '_')
#define isBlank(c) (bool) (c == ' ' || c == '\t')
#define isType(token,t) (bool) ((token)->type == (t))
#define isKeyword(token,k) (bool) ((token)->keyword == (k))
#define isSecondaryKeyword(token,k) (bool) ((token)->secondary == NULL ? \
false : (token)->secondary->keyword == (k))
/*
* DATA DECLARATIONS
*/
/* Used to designate type of line read in fixed source form.
*/
typedef enum eFortranLineType {
LTYPE_UNDETERMINED,
LTYPE_INVALID,
LTYPE_COMMENT,
LTYPE_CONTINUATION,
LTYPE_EOF,
LTYPE_INITIAL,
LTYPE_SHORT
} lineType;
/* Used to specify type of keyword.
*/
enum eKeywordId {
KEYWORD_abstract,
KEYWORD_allocatable,
KEYWORD_assignment,
KEYWORD_associate,
KEYWORD_automatic,
KEYWORD_bind,
KEYWORD_block,
KEYWORD_byte,
KEYWORD_cexternal,
KEYWORD_cglobal,
KEYWORD_class,
KEYWORD_character,
KEYWORD_codimension,
KEYWORD_common,
KEYWORD_complex,
KEYWORD_contains,
KEYWORD_data,
KEYWORD_deferred,
KEYWORD_dimension,
KEYWORD_dllexport,
KEYWORD_dllimport,
KEYWORD_do,
KEYWORD_double,
KEYWORD_elemental,
KEYWORD_end,
KEYWORD_entry,
KEYWORD_enum,
KEYWORD_enumerator,
KEYWORD_equivalence,
KEYWORD_extends,
KEYWORD_external,
KEYWORD_final,
KEYWORD_forall,
KEYWORD_format,
KEYWORD_function,
KEYWORD_generic,
KEYWORD_if,
KEYWORD_implicit,
KEYWORD_import,
KEYWORD_include,
KEYWORD_inline,
KEYWORD_integer,
KEYWORD_intent,
KEYWORD_interface,
KEYWORD_intrinsic,
KEYWORD_logical,
KEYWORD_map,
KEYWORD_module,
KEYWORD_namelist,
KEYWORD_non_overridable,
KEYWORD_nopass,
KEYWORD_operator,
KEYWORD_optional,
KEYWORD_parameter,
KEYWORD_pascal,
KEYWORD_pass,
KEYWORD_pexternal,
KEYWORD_pglobal,
KEYWORD_pointer,
KEYWORD_precision,
KEYWORD_private,
KEYWORD_procedure,
KEYWORD_program,
KEYWORD_public,
KEYWORD_pure,
KEYWORD_real,
KEYWORD_record,
KEYWORD_recursive,
KEYWORD_save,
KEYWORD_select,
KEYWORD_sequence,
KEYWORD_static,
KEYWORD_stdcall,
KEYWORD_structure,
KEYWORD_subroutine,
KEYWORD_target,
KEYWORD_then,
KEYWORD_type,
KEYWORD_union,
KEYWORD_use,
KEYWORD_value,
KEYWORD_virtual,
KEYWORD_volatile,
KEYWORD_where,
KEYWORD_while
};
typedef int keywordId; /* to allow KEYWORD_NONE */
typedef enum eTokenType {
TOKEN_UNDEFINED,
TOKEN_EOF,
TOKEN_COMMA,
TOKEN_DOUBLE_COLON,
TOKEN_IDENTIFIER,
TOKEN_KEYWORD,
TOKEN_LABEL,
TOKEN_NUMERIC,
TOKEN_OPERATOR,
TOKEN_PAREN_CLOSE,
TOKEN_PAREN_OPEN,
TOKEN_SQUARE_OPEN,
TOKEN_SQUARE_CLOSE,
TOKEN_PERCENT,
TOKEN_STATEMENT_END,
TOKEN_STRING
} tokenType;
typedef enum eTagType {
TAG_UNDEFINED = -1,
TAG_BLOCK_DATA,
TAG_COMMON_BLOCK,
TAG_ENTRY_POINT,
TAG_ENUM,
TAG_FUNCTION,
TAG_INTERFACE,
TAG_COMPONENT,
TAG_LABEL,
TAG_LOCAL,
TAG_MODULE,
TAG_METHOD,
TAG_NAMELIST,
TAG_ENUMERATOR,
TAG_PROGRAM,
TAG_PROTOTYPE,
TAG_SUBROUTINE,
TAG_DERIVED_TYPE,
TAG_VARIABLE,
TAG_COUNT /* must be last */
} tagType;
typedef enum eImplementation {
IMP_DEFAULT,
IMP_ABSTRACT,
IMP_DEFERRED,
IMP_NON_OVERRIDABLE,
IMP_COUNT
} impType;
typedef struct sTokenInfo {
tokenType type;
keywordId keyword;
tagType tag;
vString* string;
vString* parentType;
vString* signature;
impType implementation;
bool isMethod;
struct sTokenInfo *secondary;
unsigned long lineNumber;
MIOPos filePosition;
} tokenInfo;
/*
* DATA DEFINITIONS
*/
static langType Lang_fortran;
static int Ungetc;
static unsigned int Column;
static bool FreeSourceForm;
static bool FreeSourceFormFound = false;
static bool ParsingString;
/* indexed by tagType */
static kindOption FortranKinds [] = {
{ true, 'b', "blockData", "block data"},
{ true, 'c', "common", "common blocks"},
{ true, 'e', "entry", "entry points"},
{ true, 'E', "enum", "enumerations"},
{ true, 'f', "function", "functions"},
{ true, 'i', "interface", "interface contents, generic names, and operators"},
{ true, 'k', "component", "type and structure components"},
{ true, 'l', "label", "labels"},
{ false, 'L', "local", "local, common block, and namelist variables"},
{ true, 'm', "module", "modules"},
{ true, 'M', "method", "type bound procedures"},
{ true, 'n', "namelist", "namelists"},
{ true, 'N', "enumerator", "enumeration values"},
{ true, 'p', "program", "programs"},
{ false, 'P', "prototype", "subprogram prototypes"},
{ true, 's', "subroutine", "subroutines"},
{ true, 't', "type", "derived types and structures"},
{ true, 'v', "variable", "program (global) and module variables"}
};
/* For efinitions of Fortran 77 with extensions:
* http://www.fortran.com/fortran/F77_std/rjcnf0001.html
* http://scienide.uwaterloo.ca/MIPSpro7/007-2362-004/sgi_html/index.html
*
* For the Compaq Fortran Reference Manual:
* http://h18009.www1.hp.com/fortran/docs/lrm/dflrm.htm
*/
static const keywordTable FortranKeywordTable [] = {
/* keyword keyword ID */
{ "abstract", KEYWORD_abstract },
{ "allocatable", KEYWORD_allocatable },
{ "assignment", KEYWORD_assignment },
{ "associate", KEYWORD_associate },
{ "automatic", KEYWORD_automatic },
{ "bind", KEYWORD_bind },
{ "block", KEYWORD_block },
{ "byte", KEYWORD_byte },
{ "cexternal", KEYWORD_cexternal },
{ "cglobal", KEYWORD_cglobal },
{ "class", KEYWORD_class },
{ "character", KEYWORD_character },
{ "codimension", KEYWORD_codimension },
{ "common", KEYWORD_common },
{ "complex", KEYWORD_complex },
{ "contains", KEYWORD_contains },
{ "data", KEYWORD_data },
{ "deferred", KEYWORD_deferred },
{ "dimension", KEYWORD_dimension },
{ "dll_export", KEYWORD_dllexport },
{ "dll_import", KEYWORD_dllimport },
{ "do", KEYWORD_do },
{ "double", KEYWORD_double },
{ "elemental", KEYWORD_elemental },
{ "end", KEYWORD_end },
{ "entry", KEYWORD_entry },
{ "enum", KEYWORD_enum },
{ "enumerator", KEYWORD_enumerator },
{ "equivalence", KEYWORD_equivalence },
{ "extends", KEYWORD_extends },
{ "external", KEYWORD_external },
{ "final", KEYWORD_final },
{ "forall", KEYWORD_forall },
{ "format", KEYWORD_format },
{ "function", KEYWORD_function },
{ "generic", KEYWORD_generic },
{ "if", KEYWORD_if },
{ "implicit", KEYWORD_implicit },
{ "import", KEYWORD_import },
{ "include", KEYWORD_include },
{ "inline", KEYWORD_inline },
{ "integer", KEYWORD_integer },
{ "intent", KEYWORD_intent },
{ "interface", KEYWORD_interface },
{ "intrinsic", KEYWORD_intrinsic },
{ "logical", KEYWORD_logical },
{ "map", KEYWORD_map },
{ "module", KEYWORD_module },
{ "namelist", KEYWORD_namelist },
{ "non_overridable", KEYWORD_non_overridable },
{ "nopass", KEYWORD_nopass },
{ "operator", KEYWORD_operator },
{ "optional", KEYWORD_optional },
{ "parameter", KEYWORD_parameter },
{ "pascal", KEYWORD_pascal },
{ "pass", KEYWORD_pass },
{ "pexternal", KEYWORD_pexternal },
{ "pglobal", KEYWORD_pglobal },
{ "pointer", KEYWORD_pointer },
{ "precision", KEYWORD_precision },
{ "private", KEYWORD_private },
{ "procedure", KEYWORD_procedure },
{ "program", KEYWORD_program },
{ "public", KEYWORD_public },
{ "pure", KEYWORD_pure },
{ "real", KEYWORD_real },
{ "record", KEYWORD_record },
{ "recursive", KEYWORD_recursive },
{ "save", KEYWORD_save },
{ "select", KEYWORD_select },
{ "sequence", KEYWORD_sequence },
{ "static", KEYWORD_static },
{ "stdcall", KEYWORD_stdcall },
{ "structure", KEYWORD_structure },
{ "subroutine", KEYWORD_subroutine },
{ "target", KEYWORD_target },
{ "then", KEYWORD_then },
{ "type", KEYWORD_type },
{ "union", KEYWORD_union },
{ "use", KEYWORD_use },
{ "value", KEYWORD_value },
{ "virtual", KEYWORD_virtual },
{ "volatile", KEYWORD_volatile },
{ "where", KEYWORD_where },
{ "while", KEYWORD_while }
};
static struct {
unsigned int count;
unsigned int max;
tokenInfo* list;
} Ancestors = { 0, 0, NULL };
/*
* FUNCTION PROTOTYPES
*/
static void parseStructureStmt (tokenInfo *const token);
static void parseUnionStmt (tokenInfo *const token);
static void parseDerivedTypeDef (tokenInfo *const token);
static void parseSubprogram (tokenInfo *const token);
/*
* FUNCTION DEFINITIONS
*/
static void ancestorPush (tokenInfo *const token)
{
enum { incrementalIncrease = 10 };
if (Ancestors.list == NULL)
{
Assert (Ancestors.max == 0);
Ancestors.count = 0;
Ancestors.max = incrementalIncrease;
Ancestors.list = xMalloc (Ancestors.max, tokenInfo);
}
else if (Ancestors.count == Ancestors.max)
{
Ancestors.max += incrementalIncrease;
Ancestors.list = xRealloc (Ancestors.list, Ancestors.max, tokenInfo);
}
Ancestors.list [Ancestors.count] = *token;
Ancestors.list [Ancestors.count].string = vStringNewCopy (token->string);
Ancestors.list [Ancestors.count].signature = token->signature? vStringNewCopy (token->signature): NULL;
Ancestors.count++;
}
static void ancestorPop (void)
{
Assert (Ancestors.count > 0);
--Ancestors.count;
vStringDelete (Ancestors.list [Ancestors.count].string);
vStringDelete (Ancestors.list [Ancestors.count].signature);
Ancestors.list [Ancestors.count].type = TOKEN_UNDEFINED;
Ancestors.list [Ancestors.count].keyword = KEYWORD_NONE;
Ancestors.list [Ancestors.count].secondary = NULL;
Ancestors.list [Ancestors.count].tag = TAG_UNDEFINED;
Ancestors.list [Ancestors.count].string = NULL;
Ancestors.list [Ancestors.count].lineNumber = 0L;
Ancestors.list [Ancestors.count].implementation = IMP_DEFAULT;
Ancestors.list [Ancestors.count].isMethod = false;
}
static const tokenInfo* ancestorScope (void)
{
tokenInfo *result = NULL;
unsigned int i;
for (i = Ancestors.count ; i > 0 && result == NULL ; --i)
{
tokenInfo *const token = Ancestors.list + i - 1;
if (token->type == TOKEN_IDENTIFIER &&
token->tag != TAG_UNDEFINED && token->tag != TAG_INTERFACE &&
token->tag != TAG_ENUM)
result = token;
}
return result;
}
static const tokenInfo* ancestorTop (void)
{
Assert (Ancestors.count > 0);
return &Ancestors.list [Ancestors.count - 1];
}
#define ancestorCount() (Ancestors.count)
static void ancestorClear (void)
{
while (Ancestors.count > 0)
ancestorPop ();
if (Ancestors.list != NULL)
eFree (Ancestors.list);
Ancestors.list = NULL;
Ancestors.count = 0;
Ancestors.max = 0;
}
static bool insideInterface (void)
{
bool result = false;
unsigned int i;
for (i = 0 ; i < Ancestors.count && !result ; ++i)
{
if (Ancestors.list [i].tag == TAG_INTERFACE)
result = true;
}
return result;
}
/*
* Tag generation functions
*/
static tokenInfo *newToken (void)
{
tokenInfo *const token = xMalloc (1, tokenInfo);
token->type = TOKEN_UNDEFINED;
token->keyword = KEYWORD_NONE;
token->tag = TAG_UNDEFINED;
token->string = vStringNew ();
token->secondary = NULL;
token->parentType = NULL;
token->signature = NULL;
token->implementation = IMP_DEFAULT;
token->isMethod = false;
token->lineNumber = getInputLineNumber ();
token->filePosition = getInputFilePosition ();
return token;
}
static tokenInfo *newTokenFrom (tokenInfo *const token)
{
tokenInfo *result = xMalloc (1, tokenInfo);
*result = *token;
result->string = vStringNewCopy (token->string);
token->secondary = NULL;
token->parentType = NULL;
token->signature = NULL;
return result;
}
static void deleteToken (tokenInfo *const token)
{
if (token != NULL)
{
vStringDelete (token->string);
vStringDelete (token->parentType);
vStringDelete (token->signature);
deleteToken (token->secondary);
token->secondary = NULL;
eFree (token);
}
}
static bool isFileScope (const tagType type)
{
return (bool) (type == TAG_LABEL || type == TAG_LOCAL);
}
static bool includeTag (const tagType type)
{
bool include;
Assert (type != TAG_UNDEFINED);
include = FortranKinds [(int) type].enabled;
if (include && isFileScope (type))
include = isXtagEnabled(XTAG_FILE_SCOPE);
return include;
}
static const char *implementationString (const impType imp)
{
static const char *const names [] ={
"?", "abstract", "deferred", "non_overridable"
};
Assert (ARRAY_SIZE (names) == IMP_COUNT);
Assert ((int) imp < IMP_COUNT);
return names [(int) imp];
}
static void makeFortranTag (tokenInfo *const token, tagType tag)
{
token->tag = tag;
if (includeTag (token->tag))
{
const char *const name = vStringValue (token->string);
tagEntryInfo e;
initTagEntry (&e, name, &(FortranKinds [token->tag]));
if (token->tag == TAG_COMMON_BLOCK)
e.lineNumberEntry = (bool) (Option.locate != EX_PATTERN);
e.lineNumber = token->lineNumber;
e.filePosition = token->filePosition;
e.isFileScope = isFileScope (token->tag);
if (e.isFileScope)
markTagExtraBit (&e, XTAG_FILE_SCOPE);
e.truncateLine = (bool) (token->tag != TAG_LABEL);
if (ancestorCount () > 0)
{
const tokenInfo* const scope = ancestorScope ();
if (scope != NULL)
{
e.extensionFields.scopeKind = &(FortranKinds [scope->tag]);
e.extensionFields.scopeName = vStringValue (scope->string);
}
}
if (token->parentType != NULL &&
vStringLength (token->parentType) > 0 &&
token->tag == TAG_DERIVED_TYPE)
e.extensionFields.inheritance = vStringValue (token->parentType);
if (token->implementation != IMP_DEFAULT)
e.extensionFields.implementation =
implementationString (token->implementation);
if (token->signature &&
vStringLength (token->signature) > 0 &&
(token->tag == TAG_FUNCTION ||
token->tag == TAG_SUBROUTINE ||
token->tag == TAG_PROTOTYPE))
e.extensionFields.signature = vStringValue (token->signature);
makeTagEntry (&e);
}
}
/*
* Parsing functions
*/
static int skipLine (void)
{
int c;
do
c = getcFromInputFile ();
while (c != EOF && c != '\n');
return c;
}
static void makeLabelTag (vString *const label)
{
tokenInfo *token = newToken ();
token->type = TOKEN_LABEL;
vStringCopy (token->string, label);
makeFortranTag (token, TAG_LABEL);
deleteToken (token);
}
static lineType getLineType (void)
{
vString *label = vStringNew ();
int column = 0;
lineType type = LTYPE_UNDETERMINED;
do /* read in first 6 "margin" characters */
{
int c = getcFromInputFile ();
/* 3.2.1 Comment_Line. A comment line is any line that contains
* a C or an asterisk in column 1, or contains only blank characters
* in columns 1 through 72. A comment line that contains a C or
* an asterisk in column 1 may contain any character capable of
* representation in the processor in columns 2 through 72.
*/
/* EXCEPTION! Some compilers permit '!' as a comment character here.
*
* Treat # and $ in column 1 as comment to permit preprocessor directives.
* Treat D and d in column 1 as comment for HP debug statements.
*/
if (column == 0 && strchr ("*Cc!#$Dd", c) != NULL)
type = LTYPE_COMMENT;
else if (c == '\t') /* EXCEPTION! Some compilers permit a tab here */
{
column = 8;
type = LTYPE_INITIAL;
}
else if (column == 5)
{
/* 3.2.2 Initial_Line. An initial line is any line that is not
* a comment line and contains the character blank or the digit 0
* in column 6. Columns 1 through 5 may contain a statement label
* (3.4), or each of the columns 1 through 5 must contain the
* character blank.
*/
if (c == ' ' || c == '0')
type = LTYPE_INITIAL;
/* 3.2.3 Continuation_Line. A continuation line is any line that
* contains any character of the FORTRAN character set other than
* the character blank or the digit 0 in column 6 and contains
* only blank characters in columns 1 through 5.
*/
else if (vStringLength (label) == 0)
type = LTYPE_CONTINUATION;
else
type = LTYPE_INVALID;
}
else if (c == ' ')
;
else if (c == EOF)
type = LTYPE_EOF;
else if (c == '\n')
type = LTYPE_SHORT;
else if (isdigit (c))
vStringPut (label, c);
else
type = LTYPE_INVALID;
++column;
} while (column < 6 && type == LTYPE_UNDETERMINED);
Assert (type != LTYPE_UNDETERMINED);
if (vStringLength (label) > 0)
makeLabelTag (label);
vStringDelete (label);
return type;
}
static int getFixedFormChar (void)
{
bool newline = false;
lineType type;
int c = '\0';
if (Column > 0)
{
#ifdef STRICT_FIXED_FORM
/* EXCEPTION! Some compilers permit more than 72 characters per line.
*/
if (Column > 71)
c = skipLine ();
else
#endif
{
c = getcFromInputFile ();
++Column;
}
if (c == '\n')
{
newline = true; /* need to check for continuation line */
Column = 0;
}
else if (c == '!' && ! ParsingString)
{
c = skipLine ();
newline = true; /* need to check for continuation line */
Column = 0;
}
else if (c == '&') /* check for free source form */
{
const int c2 = getcFromInputFile ();
if (c2 == '\n')
FreeSourceFormFound = true;
else
ungetcToInputFile (c2);
}
}
while (Column == 0)
{
type = getLineType ();
switch (type)
{
case LTYPE_UNDETERMINED:
case LTYPE_INVALID:
FreeSourceFormFound = true;
if (! FreeSourceForm)
return EOF;
case LTYPE_SHORT: break;
case LTYPE_COMMENT: skipLine (); break;
case LTYPE_EOF:
Column = 6;
if (newline)
c = '\n';
else
c = EOF;
break;
case LTYPE_INITIAL:
if (newline)
{
c = '\n';
Column = 6;
break;
}
/* fall through to next case */
case LTYPE_CONTINUATION:
Column = 5;
do
{
c = getcFromInputFile ();
++Column;
} while (isBlank (c));
if (c == '\n')
Column = 0;
else if (Column > 6)
{
ungetcToInputFile (c);
c = ' ';
}
break;
default:
Assert ("Unexpected line type" == NULL);
}
}
return c;
}
static int skipToNextLine (void)
{
int c = skipLine ();
if (c != EOF)
c = getcFromInputFile ();
return c;
}
static int getFreeFormChar (void)
{
static bool newline = true;
bool advanceLine = false;
int c = getcFromInputFile ();
/* If the last nonblank, non-comment character of a FORTRAN 90
* free-format text line is an ampersand then the next non-comment
* line is a continuation line.
*/
if (c == '&')
{
do
c = getcFromInputFile ();
while (isspace (c) && c != '\n');
if (c == '\n')
{
newline = true;
advanceLine = true;
}
else if (c == '!')
advanceLine = true;
else
{
ungetcToInputFile (c);
c = '&';
}
}
else if (newline && (c == '!' || c == '#'))
advanceLine = true;
while (advanceLine)
{
while (isspace (c))
c = getcFromInputFile ();
if (c == '!' || (newline && c == '#'))
{
c = skipToNextLine ();
newline = true;
continue;
}
if (c == '&')
c = getcFromInputFile ();
else
advanceLine = false;
}
newline = (bool) (c == '\n');
return c;
}
static int getChar (void)
{
int c;
if (Ungetc != '\0')
{
c = Ungetc;
Ungetc = '\0';
}
else if (FreeSourceForm)
c = getFreeFormChar ();
else
c = getFixedFormChar ();
return c;
}
static void ungetChar (const int c)
{
Ungetc = c;
}
/* If a numeric is passed in 'c', this is used as the first digit of the
* numeric being parsed.
*/
static vString *parseInteger (int c)
{
vString *string = vStringNew ();
if (c == '-')
{
vStringPut (string, c);
c = getChar ();
}
else if (! isdigit (c))
c = getChar ();
while (c != EOF && isdigit (c))
{
vStringPut (string, c);
c = getChar ();
}
if (c == '_')
{
do
c = getChar ();
while (c != EOF && isalpha (c));
}
ungetChar (c);
return string;
}
static vString *parseNumeric (int c)
{
vString *string = parseInteger (c);
c = getChar ();
if (c == '.')
{
vString *integer = parseInteger ('\0');
vStringPut (string, c);
vStringCat (string, integer);
vStringDelete (integer);
c = getChar ();
}
if (tolower (c) == 'e')
{
vString *integer = parseInteger ('\0');
vStringPut (string, c);
vStringCat (string, integer);
vStringDelete (integer);
}
else
ungetChar (c);
return string;
}
static void parseString (vString *const string, const int delimiter)
{
const unsigned long inputLineNumber = getInputLineNumber ();
int c;
ParsingString = true;
c = getChar ();
while (c != delimiter && c != '\n' && c != EOF)
{
vStringPut (string, c);
c = getChar ();
}
if (c == '\n' || c == EOF)
{
verbose ("%s: unterminated character string at line %lu\n",
getInputFileName (), inputLineNumber);
if (c != EOF && ! FreeSourceForm)
FreeSourceFormFound = true;
}
ParsingString = false;
}
/* Read a C identifier beginning with "firstChar" and places it into "name".
*/
static void parseIdentifier (vString *const string, const int firstChar)
{
int c = firstChar;
do
{
vStringPut (string, c);
c = getChar ();
} while (isident (c));
ungetChar (c); /* unget non-identifier character */
}
static void checkForLabel (void)
{
tokenInfo* token = NULL;
int length;
int c;
do
c = getChar ();
while (isBlank (c));
for (length = 0 ; isdigit (c) && length < 5 ; ++length)
{
if (token == NULL)
{
token = newToken ();
token->type = TOKEN_LABEL;
}
vStringPut (token->string, c);
c = getChar ();
}
if (length > 0 && token != NULL)
{
makeFortranTag (token, TAG_LABEL);
deleteToken (token);
}
ungetChar (c);
}
static void readIdentifier (tokenInfo *const token, const int c)
{
parseIdentifier (token->string, c);
token->keyword = lookupCaseKeyword (vStringValue (token->string), Lang_fortran);
if (! isKeyword (token, KEYWORD_NONE))
token->type = TOKEN_KEYWORD;
else
{
token->type = TOKEN_IDENTIFIER;
if (strncmp (vStringValue (token->string), "end", 3) == 0)
{
vString *const sub = vStringNewInit (vStringValue (token->string) + 3);
const keywordId kw = lookupCaseKeyword (vStringValue (sub), Lang_fortran);
vStringDelete (sub);
if (kw != KEYWORD_NONE)
{
token->secondary = newToken ();
token->secondary->type = TOKEN_KEYWORD;
token->secondary->keyword = kw;
token->keyword = KEYWORD_end;
}
}
}
}
static void readToken (tokenInfo *const token)
{
int c;
deleteToken (token->secondary);
token->type = TOKEN_UNDEFINED;
token->tag = TAG_UNDEFINED;
token->keyword = KEYWORD_NONE;
token->secondary = NULL;
token->implementation = IMP_DEFAULT;
vStringClear (token->string);
vStringDelete (token->parentType);
vStringDelete (token->signature);
token->parentType = NULL;
token->isMethod = false;
token->signature = NULL;
getNextChar:
c = getChar ();
token->lineNumber = getInputLineNumber ();
token->filePosition = getInputFilePosition ();
switch (c)
{
case EOF: token->type = TOKEN_EOF; break;
case ' ': goto getNextChar;
case '\t': goto getNextChar;
case ',': token->type = TOKEN_COMMA; break;
case '(': token->type = TOKEN_PAREN_OPEN; break;
case ')': token->type = TOKEN_PAREN_CLOSE; break;
case '[': token->type = TOKEN_SQUARE_OPEN; break;
case ']': token->type = TOKEN_SQUARE_CLOSE; break;
case '%': token->type = TOKEN_PERCENT; break;
case '*':
case '/':
case '+':
case '-':
case '=':
case '<':
case '>':
{
const char *const operatorChars = "*/+=<>";
do {
vStringPut (token->string, c);
c = getChar ();
} while (strchr (operatorChars, c) != NULL);
ungetChar (c);
token->type = TOKEN_OPERATOR;
break;
}
case '!':
if (FreeSourceForm)
{
do
c = getChar ();
while (c != '\n' && c != EOF);
}
else
{
skipLine ();
Column = 0;
}
/* fall through to newline case */
case '\n':
token->type = TOKEN_STATEMENT_END;
if (FreeSourceForm)
checkForLabel ();
break;
case '.':
parseIdentifier (token->string, c);
c = getChar ();
if (c == '.')
{
vStringPut (token->string, c);
token->type = TOKEN_OPERATOR;
}
else
{
ungetChar (c);
token->type = TOKEN_UNDEFINED;
}
break;
case '"':
case '\'':
parseString (token->string, c);
token->type = TOKEN_STRING;
break;
case ';':
token->type = TOKEN_STATEMENT_END;
break;
case ':':
c = getChar ();
if (c == ':')
token->type = TOKEN_DOUBLE_COLON;
else
{
ungetChar (c);
token->type = TOKEN_UNDEFINED;
}
break;
default:
if (isalpha (c))
readIdentifier (token, c);
else if (isdigit (c))
{
vString *numeric = parseNumeric (c);
vStringCat (token->string, numeric);
vStringDelete (numeric);
token->type = TOKEN_NUMERIC;
}
else
token->type = TOKEN_UNDEFINED;
break;
}
}
static void readSubToken (tokenInfo *const token)
{
if (token->secondary == NULL)
{
token->secondary = newToken ();
readToken (token->secondary);
}
}
/*
* Scanning functions
*/
static void skipToToken (tokenInfo *const token, tokenType type)
{
while (! isType (token, type) && ! isType (token, TOKEN_STATEMENT_END) &&
!(token->secondary != NULL && isType (token->secondary, TOKEN_STATEMENT_END)) &&
! isType (token, TOKEN_EOF))
readToken (token);
}
static void skipPast (tokenInfo *const token, tokenType type)
{
skipToToken (token, type);
if (! isType (token, TOKEN_STATEMENT_END))
readToken (token);
}
static void skipToNextStatement (tokenInfo *const token)
{
do
{
skipToToken (token, TOKEN_STATEMENT_END);
readToken (token);
} while (isType (token, TOKEN_STATEMENT_END));
}
/* skip over paired tokens, managing nested pairs and stopping at statement end
* or right after closing token, whatever comes first.
*/
static void skipOverPairsFull (tokenInfo *const token,
tokenType topen,
tokenType tclose,
void (* token_cb) (tokenInfo *const, void *),
void *user_data)
{
int level = 0;
do {
if (isType (token, TOKEN_STATEMENT_END))
break;
else if (isType (token, topen))
++level;
else if (isType (token, tclose))
--level;
else if (token_cb)
token_cb (token, user_data);
readToken (token);
} while (level > 0 && !isType (token, TOKEN_EOF));
}
static void skipOverParensFull (tokenInfo *const token,
void (* token_cb) (tokenInfo *const, void *),
void *user_data)
{
skipOverPairsFull (token, TOKEN_PAREN_OPEN,
TOKEN_PAREN_CLOSE,
token_cb, user_data);
}
static void skipOverSquaresFull (tokenInfo *const token,
void (* token_cb) (tokenInfo *const, void *),
void *user_data)
{
skipOverPairsFull (token, TOKEN_SQUARE_OPEN,
TOKEN_SQUARE_CLOSE,
token_cb, user_data);
}
static void skipOverParens (tokenInfo *const token)
{
skipOverParensFull (token, NULL, NULL);
}
static void skipOverSqaures (tokenInfo *const token)
{
skipOverSquaresFull (token, NULL, NULL);
}
static bool isTypeSpec (tokenInfo *const token)
{
bool result;
switch (token->keyword)
{
case KEYWORD_byte:
case KEYWORD_integer:
case KEYWORD_real:
case KEYWORD_double:
case KEYWORD_complex:
case KEYWORD_character:
case KEYWORD_logical:
case KEYWORD_record:
case KEYWORD_type:
case KEYWORD_procedure:
case KEYWORD_final:
case KEYWORD_generic:
case KEYWORD_class:
case KEYWORD_enumerator:
result = true;
break;
default:
result = false;
break;
}
return result;
}
static bool isSubprogramPrefix (tokenInfo *const token)
{
bool result;
switch (token->keyword)
{
case KEYWORD_elemental:
case KEYWORD_pure:
case KEYWORD_recursive:
case KEYWORD_stdcall:
result = true;
break;
default:
result = false;
break;
}
return result;
}
static void parseKindSelector (tokenInfo *const token)
{
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token); /* skip kind-selector */
if (isType (token, TOKEN_OPERATOR) &&
strcmp (vStringValue (token->string), "*") == 0)
{
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token);
else
readToken (token);
}
}
/* type-spec
* is INTEGER [kind-selector]
* or REAL [kind-selector] is ( etc. )
* or DOUBLE PRECISION
* or COMPLEX [kind-selector]
* or CHARACTER [kind-selector]
* or LOGICAL [kind-selector]
* or TYPE ( type-name )
*
* Note that INTEGER and REAL may be followed by "*N" where "N" is an integer
*/
static void parseTypeSpec (tokenInfo *const token)
{
/* parse type-spec, leaving `token' at first token following type-spec */
Assert (isTypeSpec (token));
switch (token->keyword)
{
case KEYWORD_character:
/* skip char-selector */
readToken (token);
if (isType (token, TOKEN_OPERATOR) &&
strcmp (vStringValue (token->string), "*") == 0)
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token);
else if (isType (token, TOKEN_NUMERIC))
readToken (token);
break;
case KEYWORD_byte:
case KEYWORD_complex:
case KEYWORD_integer:
case KEYWORD_logical:
case KEYWORD_real:
case KEYWORD_procedure:
case KEYWORD_class:
readToken (token);
parseKindSelector (token);
break;
case KEYWORD_double:
readToken (token);
if (isKeyword (token, KEYWORD_complex) ||
isKeyword (token, KEYWORD_precision))
readToken (token);
else
skipToToken (token, TOKEN_STATEMENT_END);
break;
case KEYWORD_record:
readToken (token);
if (isType (token, TOKEN_OPERATOR) &&
strcmp (vStringValue (token->string), "/") == 0)
{
readToken (token); /* skip to structure name */
readToken (token); /* skip to '/' */
readToken (token); /* skip to variable name */
}
break;
case KEYWORD_type:
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token); /* skip type-name */
else
parseDerivedTypeDef (token);
break;
case KEYWORD_final:
case KEYWORD_generic:
case KEYWORD_enumerator:
readToken (token);
break;
default:
skipToToken (token, TOKEN_STATEMENT_END);
break;
}
}
static bool skipStatementIfKeyword (tokenInfo *const token, keywordId keyword)
{
bool result = false;
if (isKeyword (token, keyword))
{
result = true;
skipToNextStatement (token);
}
return result;
}
/* parse extends qualifier, leaving token at first token following close
* parenthesis.
*/
static void makeParentType (tokenInfo *const token, void *userData)
{
if (((tokenInfo *)userData)->parentType)
vStringDelete (((tokenInfo *)userData)->parentType);
((tokenInfo *)userData)->parentType = vStringNewCopy (token->string);
}
static void parseExtendsQualifier (tokenInfo *const token,
tokenInfo *const qualifierToken)
{
skipOverParensFull (token, makeParentType, qualifierToken);
}
static void parseAbstractQualifier (tokenInfo *const token,
tokenInfo *const qualifierToken)
{
Assert (isKeyword (token, KEYWORD_abstract));
qualifierToken->implementation = IMP_ABSTRACT;
readToken (token);
}
static void parseDeferredQualifier (tokenInfo *const token,
tokenInfo *const qualifierToken)
{
Assert (isKeyword (token, KEYWORD_deferred));
qualifierToken->implementation = IMP_DEFERRED;
readToken (token);
}
static void parseNonOverridableQualifier (tokenInfo *const token,
tokenInfo *const qualifierToken)
{
Assert (isKeyword (token, KEYWORD_non_overridable));
qualifierToken->implementation = IMP_NON_OVERRIDABLE;
readToken (token);
}
/* parse a list of qualifying specifiers, leaving `token' at first token
* following list. Examples of such specifiers are:
* [[, attr-spec] ::]
* [[, component-attr-spec-list] ::]
*
* attr-spec
* is PARAMETER
* or access-spec (is PUBLIC or PRIVATE)
* or ALLOCATABLE
* or DIMENSION ( array-spec )
* or EXTENDS ( extends-spec )
* or EXTERNAL
* or INTENT ( intent-spec )
* or INTRINSIC
* or OPTIONAL
* or POINTER
* or SAVE
* or TARGET
* or PASS
* or NOPASS
* or DEFERRED
* or NON_OVERRIDABLE
* or ABSTRACT
*
* component-attr-spec
* is POINTER
* or DIMENSION ( component-array-spec )
*/
static tokenInfo *parseQualifierSpecList (tokenInfo *const token)
{
tokenInfo *qualifierToken = newToken ();
do
{
readToken (token); /* should be an attr-spec */
switch (token->keyword)
{
case KEYWORD_parameter:
case KEYWORD_allocatable:
case KEYWORD_external:
case KEYWORD_intrinsic:
case KEYWORD_optional:
case KEYWORD_private:
case KEYWORD_pointer:
case KEYWORD_public:
case KEYWORD_save:
case KEYWORD_target:
case KEYWORD_nopass:
readToken (token);
break;
case KEYWORD_dimension:
case KEYWORD_intent:
case KEYWORD_bind:
readToken (token);
skipOverParens (token);
break;
case KEYWORD_extends:
readToken (token);
parseExtendsQualifier (token, qualifierToken);
break;
case KEYWORD_pass:
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token);
break;
case KEYWORD_abstract:
parseAbstractQualifier (token, qualifierToken);
break;
case KEYWORD_deferred:
parseDeferredQualifier (token, qualifierToken);
break;
case KEYWORD_non_overridable:
parseNonOverridableQualifier (token, qualifierToken);
break;
case KEYWORD_codimension:
readToken (token);
skipOverSqaures (token);
break;
default: skipToToken (token, TOKEN_STATEMENT_END); break;
}
} while (isType (token, TOKEN_COMMA));
if (! isType (token, TOKEN_DOUBLE_COLON))
skipToToken (token, TOKEN_STATEMENT_END);
return qualifierToken;
}
static tagType variableTagType (tokenInfo *const st)
{
tagType result = TAG_VARIABLE;
if (ancestorCount () > 0)
{
const tokenInfo* const parent = ancestorTop ();
switch (parent->tag)
{
case TAG_MODULE: result = TAG_VARIABLE; break;
case TAG_DERIVED_TYPE:
if (st && st->isMethod)
result = TAG_METHOD;
else
result = TAG_COMPONENT;
break;
case TAG_FUNCTION: result = TAG_LOCAL; break;
case TAG_SUBROUTINE: result = TAG_LOCAL; break;
case TAG_PROTOTYPE: result = TAG_LOCAL; break;
case TAG_ENUM: result = TAG_ENUMERATOR; break;
default: result = TAG_VARIABLE; break;
}
}
return result;
}
static void parseEntityDecl (tokenInfo *const token,
tokenInfo *const st)
{
Assert (isType (token, TOKEN_IDENTIFIER));
if (st && st->implementation != IMP_DEFAULT)
token->implementation = st->implementation;
makeFortranTag (token, variableTagType (st));
readToken (token);
/* we check for both '()' and '[]'
* coarray syntax permits variable(), variable[], or variable()[]
*/
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token);
if (isType (token, TOKEN_SQUARE_OPEN))
skipOverSqaures (token);
if (isType (token, TOKEN_OPERATOR) &&
strcmp (vStringValue (token->string), "*") == 0)
{
readToken (token); /* read char-length */
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token);
else
readToken (token);
}
if (isType (token, TOKEN_OPERATOR))
{
if (strcmp (vStringValue (token->string), "/") == 0)
{ /* skip over initializations of structure field */
readToken (token);
skipPast (token, TOKEN_OPERATOR);
}
else if (strcmp (vStringValue (token->string), "=") == 0 ||
strcmp (vStringValue (token->string), "=>") == 0)
{
while (! isType (token, TOKEN_COMMA) &&
! isType (token, TOKEN_STATEMENT_END) &&
! isType (token, TOKEN_EOF))
{
readToken (token);
/* another coarray check, for () and [] */
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token);
if (isType (token, TOKEN_SQUARE_OPEN))
skipOverSqaures (token);
}
}
}
/* token left at either comma or statement end */
}
static void parseEntityDeclList (tokenInfo *const token,
tokenInfo *const st)
{
if (isType (token, TOKEN_PERCENT))
skipToNextStatement (token);
else while (isType (token, TOKEN_IDENTIFIER) ||
(isType (token, TOKEN_KEYWORD) &&
!isKeyword (token, KEYWORD_function) &&
!isKeyword (token, KEYWORD_subroutine)))
{
/* compilers accept keywords as identifiers */
if (isType (token, TOKEN_KEYWORD))
token->type = TOKEN_IDENTIFIER;
parseEntityDecl (token, st);
if (isType (token, TOKEN_COMMA))
readToken (token);
else if (isType (token, TOKEN_STATEMENT_END))
{
skipToNextStatement (token);
break;
}
}
}
/* type-declaration-stmt is
* type-spec [[, attr-spec] ... ::] entity-decl-list
*/
static void parseTypeDeclarationStmt (tokenInfo *const token)
{
Assert (isTypeSpec (token));
parseTypeSpec (token);
if (!isType (token, TOKEN_STATEMENT_END)) /* if not end of derived type... */
{
if (isType (token, TOKEN_COMMA))
{
tokenInfo* qualifierToken = parseQualifierSpecList (token);
deleteToken (qualifierToken);
}
if (isType (token, TOKEN_DOUBLE_COLON))
readToken (token);
parseEntityDeclList (token, NULL);
}
if (isType (token, TOKEN_STATEMENT_END))
skipToNextStatement (token);
}
/* namelist-stmt is
* NAMELIST /namelist-group-name/ namelist-group-object-list
* [[,]/[namelist-group-name]/ namelist-block-object-list] ...
*
* namelist-group-object is
* variable-name
*
* common-stmt is
* COMMON [/[common-block-name]/] common-block-object-list
* [[,]/[common-block-name]/ common-block-object-list] ...
*
* common-block-object is
* variable-name [ ( explicit-shape-spec-list ) ]
*/
static void parseCommonNamelistStmt (tokenInfo *const token, tagType type)
{
Assert (isKeyword (token, KEYWORD_common) ||
isKeyword (token, KEYWORD_namelist));
readToken (token);
do
{
if (isType (token, TOKEN_OPERATOR) &&
strcmp (vStringValue (token->string), "/") == 0)
{
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
{
makeFortranTag (token, type);
readToken (token);
}
skipPast (token, TOKEN_OPERATOR);
}
if (isType (token, TOKEN_IDENTIFIER))
makeFortranTag (token, TAG_LOCAL);
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token); /* skip explicit-shape-spec-list */
if (isType (token, TOKEN_COMMA))
readToken (token);
} while (! isType (token, TOKEN_STATEMENT_END) &&
! isType (token, TOKEN_EOF));
skipToNextStatement (token);
}
static void parseFieldDefinition (tokenInfo *const token)
{
if (isTypeSpec (token))
parseTypeDeclarationStmt (token);
else if (isKeyword (token, KEYWORD_structure))
parseStructureStmt (token);
else if (isKeyword (token, KEYWORD_union))
parseUnionStmt (token);
else
skipToNextStatement (token);
}
static void parseMap (tokenInfo *const token)
{
Assert (isKeyword (token, KEYWORD_map));
skipToNextStatement (token);
while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
parseFieldDefinition (token);
readSubToken (token);
/* should be at KEYWORD_map token */
skipToNextStatement (token);
}
/* UNION
* MAP
* [field-definition] [field-definition] ...
* END MAP
* MAP
* [field-definition] [field-definition] ...
* END MAP
* [MAP
* [field-definition]
* [field-definition] ...
* END MAP] ...
* END UNION
* *
*
* Typed data declarations (variables or arrays) in structure declarations
* have the form of normal Fortran typed data declarations. Data items with
* different types can be freely intermixed within a structure declaration.
*
* Unnamed fields can be declared in a structure by specifying the pseudo
* name %FILL in place of an actual field name. You can use this mechanism to
* generate empty space in a record for purposes such as alignment.
*
* All mapped field declarations that are made within a UNION declaration
* share a common location within the containing structure. When initializing
* the fields within a UNION, the final initialization value assigned
* overlays any value previously assigned to a field definition that shares
* that field.
*/
static void parseUnionStmt (tokenInfo *const token)
{
Assert (isKeyword (token, KEYWORD_union));
skipToNextStatement (token);
while (isKeyword (token, KEYWORD_map))
parseMap (token);
/* should be at KEYWORD_end token */
readSubToken (token);
/* secondary token should be KEYWORD_end token */
skipToNextStatement (token);
}
/* STRUCTURE [/structure-name/] [field-names]
* [field-definition]
* [field-definition] ...
* END STRUCTURE
*
* structure-name
* identifies the structure in a subsequent RECORD statement.
* Substructures can be established within a structure by means of either
* a nested STRUCTURE declaration or a RECORD statement.
*
* field-names
* (for substructure declarations only) one or more names having the
* structure of the substructure being defined.
*
* field-definition
* can be one or more of the following:
*
* Typed data declarations, which can optionally include one or more
* data initialization values.
*
* Substructure declarations (defined by either RECORD statements or
* subsequent STRUCTURE statements).
*
* UNION declarations, which are mapped fields defined by a block of
* statements. The syntax of a UNION declaration is described below.
*
* PARAMETER statements, which do not affect the form of the
* structure.
*/
static void parseStructureStmt (tokenInfo *const token)
{
tokenInfo *name;
Assert (isKeyword (token, KEYWORD_structure));
readToken (token);
if (isType (token, TOKEN_OPERATOR) &&
strcmp (vStringValue (token->string), "/") == 0)
{ /* read structure name */
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
makeFortranTag (token, TAG_DERIVED_TYPE);
name = newTokenFrom (token);
skipPast (token, TOKEN_OPERATOR);
}
else
{ /* fake out anonymous structure */
name = newToken ();
name->type = TOKEN_IDENTIFIER;
name->tag = TAG_DERIVED_TYPE;
vStringCopyS (name->string, "anonymous");
}
while (isType (token, TOKEN_IDENTIFIER))
{ /* read field names */
makeFortranTag (token, TAG_COMPONENT);
readToken (token);
if (isType (token, TOKEN_COMMA))
readToken (token);
}
skipToNextStatement (token);
ancestorPush (name);
while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
parseFieldDefinition (token);
readSubToken (token);
/* secondary token should be KEYWORD_structure token */
skipToNextStatement (token);
ancestorPop ();
deleteToken (name);
}
/* specification-stmt
* is access-stmt (is access-spec [[::] access-id-list)
* or allocatable-stmt (is ALLOCATABLE [::] array-name etc.)
* or common-stmt (is COMMON [ / [common-block-name] /] etc.)
* or data-stmt (is DATA data-stmt-list [[,] data-stmt-set] ...)
* or dimension-stmt (is DIMENSION [::] array-name etc.)
* or equivalence-stmt (is EQUIVALENCE equivalence-set-list)
* or external-stmt (is EXTERNAL etc.)
* or intent-stmt (is INTENT ( intent-spec ) [::] etc.)
* or intrinsic-stmt (is INTRINSIC etc.)
* or namelist-stmt (is NAMELIST / namelist-group-name / etc.)
* or optional-stmt (is OPTIONAL [::] etc.)
* or pointer-stmt (is POINTER [::] object-name etc.)
* or save-stmt (is SAVE etc.)
* or target-stmt (is TARGET [::] object-name etc.)
*
* access-spec is PUBLIC or PRIVATE
*/
static bool parseSpecificationStmt (tokenInfo *const token)
{
bool result = true;
switch (token->keyword)
{
case KEYWORD_common:
parseCommonNamelistStmt (token, TAG_COMMON_BLOCK);
break;
case KEYWORD_namelist:
parseCommonNamelistStmt (token, TAG_NAMELIST);
break;
case KEYWORD_structure:
parseStructureStmt (token);
break;
case KEYWORD_allocatable:
case KEYWORD_data:
case KEYWORD_dimension:
case KEYWORD_equivalence:
case KEYWORD_external:
case KEYWORD_intent:
case KEYWORD_intrinsic:
case KEYWORD_optional:
case KEYWORD_pointer:
case KEYWORD_private:
case KEYWORD_public:
case KEYWORD_save:
case KEYWORD_target:
skipToNextStatement (token);
break;
default:
result = false;
break;
}
return result;
}
/* Type bound generic procedure is:
* GENERIC [, access-spec ] :: generic-spec => binding-name1 [, binding-name2]...
* access-spec: PUBLIC or PRIVATE
* generic-spec: 1. generic name; 2. OPERATOR(op); 3. ASSIGNMENT(=)
* binding-name: type bound procedure
*/
static void parseGenericMethod (tokenInfo *const token)
{
if (isKeyword (token, KEYWORD_assignment) ||
isKeyword (token, KEYWORD_operator))
{
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
readToken (token);
if (isType (token, TOKEN_OPERATOR))
makeFortranTag (token, TAG_METHOD);
}
else
{
if (isType (token, TOKEN_KEYWORD))
token->type = TOKEN_IDENTIFIER;
makeFortranTag (token, TAG_METHOD);
}
skipToNextStatement (token);
}
/* component-def-stmt is
* type-spec [[, component-attr-spec-list] ::] component-decl-list
*
* component-decl is
* component-name [ ( component-array-spec ) ] [ * char-length ]
*/
static void parseComponentDefStmt (tokenInfo *const token)
{
tokenInfo* st = newToken ();
tokenInfo* qt = NULL;
bool isGeneric = false;
Assert (isTypeSpec (token));
if (isKeyword (token, KEYWORD_procedure) ||
isKeyword (token, KEYWORD_final) ||
isKeyword (token, KEYWORD_generic))
st->isMethod = true;
if (isKeyword (token, KEYWORD_generic))
isGeneric = true;
parseTypeSpec (token);
if (isType (token, TOKEN_COMMA))
{
qt = parseQualifierSpecList (token);
if (qt->implementation != IMP_DEFAULT)
st->implementation = qt->implementation;
deleteToken (qt);
}
if (isType (token, TOKEN_DOUBLE_COLON))
readToken (token);
if (isGeneric)
parseGenericMethod (token);
else
parseEntityDeclList (token, st);
deleteToken (st);
}
/* derived-type-def is
* derived-type-stmt is (TYPE [[, access-spec] ::] type-name
* [private-sequence-stmt] ... (is PRIVATE or SEQUENCE)
* component-def-stmt
* [component-def-stmt] ...
* end-type-stmt
*/
static void parseDerivedTypeDef (tokenInfo *const token)
{
tokenInfo *qualifierToken = NULL;
if (isType (token, TOKEN_COMMA))
qualifierToken = parseQualifierSpecList (token);
if (isType (token, TOKEN_DOUBLE_COLON))
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
{
if (qualifierToken)
{
if (qualifierToken->parentType)
token->parentType = vStringNewCopy (qualifierToken->parentType);
if (qualifierToken->implementation != IMP_DEFAULT)
token->implementation = qualifierToken->implementation;
}
makeFortranTag (token, TAG_DERIVED_TYPE);
}
deleteToken (qualifierToken);
ancestorPush (token);
skipToNextStatement (token);
if (isKeyword (token, KEYWORD_private) ||
isKeyword (token, KEYWORD_sequence))
{
skipToNextStatement (token);
}
while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
{
if (isTypeSpec (token))
parseComponentDefStmt (token);
else
skipToNextStatement (token);
}
readSubToken (token);
/* secondary token should be KEYWORD_type token */
skipToToken (token, TOKEN_STATEMENT_END);
ancestorPop ();
}
/* interface-block
* interface-stmt (is INTERFACE [generic-spec])
* [interface-body]
* [module-procedure-stmt] ...
* end-interface-stmt (is END INTERFACE)
*
* generic-spec
* is generic-name
* or OPERATOR ( defined-operator )
* or ASSIGNMENT ( = )
*
* interface-body
* is function-stmt
* [specification-part]
* end-function-stmt
* or subroutine-stmt
* [specification-part]
* end-subroutine-stmt
*
* module-procedure-stmt is
* MODULE PROCEDURE procedure-name-list
*/
static void parseInterfaceBlock (tokenInfo *const token)
{
tokenInfo *name = NULL;
Assert (isKeyword (token, KEYWORD_interface));
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
{
makeFortranTag (token, TAG_INTERFACE);
name = newTokenFrom (token);
}
else if (isKeyword (token, KEYWORD_assignment) ||
isKeyword (token, KEYWORD_operator))
{
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
readToken (token);
if (isType (token, TOKEN_OPERATOR))
{
makeFortranTag (token, TAG_INTERFACE);
name = newTokenFrom (token);
}
}
if (name == NULL)
{
name = newToken ();
name->type = TOKEN_IDENTIFIER;
name->tag = TAG_INTERFACE;
}
ancestorPush (name);
while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
{
switch (token->keyword)
{
case KEYWORD_function:
case KEYWORD_subroutine: parseSubprogram (token); break;
default:
if (isSubprogramPrefix (token))
readToken (token);
else if (isTypeSpec (token))
parseTypeSpec (token);
else
skipToNextStatement (token);
break;
}
}
readSubToken (token);
/* secondary token should be KEYWORD_interface token */
skipToNextStatement (token);
ancestorPop ();
deleteToken (name);
}
/* enum-block
* enum-stmt (is ENUM, BIND(C) [ :: type-alias-name ]
* or ENUM [ kind-selector ] [ :: ] [ type-alias-name ])
* [ enum-body (is ENUMERATOR [ :: ] enumerator-list) ]
* end-enum-stmt (is END ENUM)
*/
static void parseEnumBlock (tokenInfo *const token)
{
tokenInfo *name = NULL;
Assert (isKeyword (token, KEYWORD_enum));
readToken (token);
if (isType (token, TOKEN_COMMA))
{
readToken (token);
if (isType (token, TOKEN_KEYWORD))
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
skipOverParens (token);
}
parseKindSelector (token);
if (isType (token, TOKEN_DOUBLE_COLON))
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
name = newTokenFrom (token);
if (name == NULL)
{
name = newToken ();
name->type = TOKEN_IDENTIFIER;
name->tag = TAG_ENUM;
}
else
makeFortranTag (name, TAG_ENUM);
skipToNextStatement (token);
ancestorPush (name);
while (! isKeyword (token, KEYWORD_end) &&
! isType(token, TOKEN_EOF))
{
if (isTypeSpec (token))
parseTypeDeclarationStmt (token);
else
skipToNextStatement (token);
}
readSubToken (token);
/* secondary token should be KEYWORD_enum token */
skipToNextStatement (token);
ancestorPop ();
deleteToken (name);
}
/* entry-stmt is
* ENTRY entry-name [ ( dummy-arg-list ) ]
*/
static void parseEntryStmt (tokenInfo *const token)
{
Assert (isKeyword (token, KEYWORD_entry));
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
makeFortranTag (token, TAG_ENTRY_POINT);
skipToNextStatement (token);
}
/* stmt-function-stmt is
* function-name ([dummy-arg-name-list]) = scalar-expr
*/
static bool parseStmtFunctionStmt (tokenInfo *const token)
{
bool result = false;
Assert (isType (token, TOKEN_IDENTIFIER));
#if 0 /* cannot reliably parse this yet */
makeFortranTag (token, TAG_FUNCTION);
#endif
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
{
skipOverParens (token);
result = (bool) (isType (token, TOKEN_OPERATOR) &&
strcmp (vStringValue (token->string), "=") == 0);
}
skipToNextStatement (token);
return result;
}
static bool isIgnoredDeclaration (tokenInfo *const token)
{
bool result;
switch (token->keyword)
{
case KEYWORD_cexternal:
case KEYWORD_cglobal:
case KEYWORD_dllexport:
case KEYWORD_dllimport:
case KEYWORD_external:
case KEYWORD_format:
case KEYWORD_include:
case KEYWORD_inline:
case KEYWORD_parameter:
case KEYWORD_pascal:
case KEYWORD_pexternal:
case KEYWORD_pglobal:
case KEYWORD_static:
case KEYWORD_value:
case KEYWORD_virtual:
case KEYWORD_volatile:
result = true;
break;
default:
result = false;
break;
}
return result;
}
/* declaration-construct
* [derived-type-def]
* [interface-block]
* [type-declaration-stmt]
* [specification-stmt]
* [parameter-stmt] (is PARAMETER ( named-constant-def-list )
* [format-stmt] (is FORMAT format-specification)
* [entry-stmt]
* [stmt-function-stmt]
*/
static bool parseDeclarationConstruct (tokenInfo *const token)
{
bool result = true;
switch (token->keyword)
{
case KEYWORD_entry: parseEntryStmt (token); break;
case KEYWORD_interface: parseInterfaceBlock (token); break;
case KEYWORD_enum: parseEnumBlock (token); break;
case KEYWORD_stdcall: readToken (token); break;
/* derived type handled by parseTypeDeclarationStmt(); */
case KEYWORD_abstract:
readToken (token);
if (isKeyword (token, KEYWORD_interface))
parseInterfaceBlock (token);
else
skipToNextStatement (token);
result = true;
break;
case KEYWORD_automatic:
readToken (token);
if (isTypeSpec (token))
parseTypeDeclarationStmt (token);
else
skipToNextStatement (token);
result = true;
break;
default:
if (isIgnoredDeclaration (token))
skipToNextStatement (token);
else if (isTypeSpec (token))
{
parseTypeDeclarationStmt (token);
result = true;
}
else if (isType (token, TOKEN_IDENTIFIER))
result = parseStmtFunctionStmt (token);
else
result = parseSpecificationStmt (token);
break;
}
return result;
}
/* implicit-part-stmt
* is [implicit-stmt] (is IMPLICIT etc.)
* or [parameter-stmt] (is PARAMETER etc.)
* or [format-stmt] (is FORMAT etc.)
* or [entry-stmt] (is ENTRY entry-name etc.)
*/
static bool parseImplicitPartStmt (tokenInfo *const token)
{
bool result = true;
switch (token->keyword)
{
case KEYWORD_entry: parseEntryStmt (token); break;
case KEYWORD_implicit:
case KEYWORD_include:
case KEYWORD_parameter:
case KEYWORD_format:
skipToNextStatement (token);
break;
default: result = false; break;
}
return result;
}
/* specification-part is
* [use-stmt] ... (is USE module-name etc.)
* [implicit-part] (is [implicit-part-stmt] ... [implicit-stmt])
* [declaration-construct] ...
*/
static bool parseSpecificationPart (tokenInfo *const token)
{
bool result = false;
while (skipStatementIfKeyword (token, KEYWORD_use))
result = true;
while (skipStatementIfKeyword (token, KEYWORD_import))
result = true;
while (parseImplicitPartStmt (token))
result = true;
while (parseDeclarationConstruct (token))
result = true;
return result;
}
/* block-data is
* block-data-stmt (is BLOCK DATA [block-data-name]
* [specification-part]
* end-block-data-stmt (is END [BLOCK DATA [block-data-name]])
*/
static void parseBlockData (tokenInfo *const token)
{
Assert (isKeyword (token, KEYWORD_block));
readToken (token);
if (isKeyword (token, KEYWORD_data))
{
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
makeFortranTag (token, TAG_BLOCK_DATA);
}
ancestorPush (token);
skipToNextStatement (token);
parseSpecificationPart (token);
while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
skipToNextStatement (token);
readSubToken (token);
/* secondary token should be KEYWORD_NONE or KEYWORD_block token */
skipToNextStatement (token);
ancestorPop ();
}
/* internal-subprogram-part is
* contains-stmt (is CONTAINS)
* internal-subprogram
* [internal-subprogram] ...
*
* internal-subprogram
* is function-subprogram
* or subroutine-subprogram
*/
static void parseInternalSubprogramPart (tokenInfo *const token)
{
bool done = false;
if (isKeyword (token, KEYWORD_contains))
skipToNextStatement (token);
do
{
switch (token->keyword)
{
case KEYWORD_function:
case KEYWORD_subroutine: parseSubprogram (token); break;
case KEYWORD_end: done = true; break;
default:
if (isSubprogramPrefix (token))
readToken (token);
else if (isTypeSpec (token))
parseTypeSpec (token);
else
readToken (token);
break;
}
} while (! done && ! isType (token, TOKEN_EOF));
}
/* module is
* module-stmt (is MODULE module-name)
* [specification-part]
* [module-subprogram-part]
* end-module-stmt (is END [MODULE [module-name]])
*
* module-subprogram-part
* contains-stmt (is CONTAINS)
* module-subprogram
* [module-subprogram] ...
*
* module-subprogram
* is function-subprogram
* or subroutine-subprogram
*/
static void parseModule (tokenInfo *const token)
{
Assert (isKeyword (token, KEYWORD_module));
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
makeFortranTag (token, TAG_MODULE);
ancestorPush (token);
skipToNextStatement (token);
parseSpecificationPart (token);
if (isKeyword (token, KEYWORD_contains))
parseInternalSubprogramPart (token);
while (! isKeyword (token, KEYWORD_end) &&
! isType (token, TOKEN_EOF))
skipToNextStatement (token);
readSubToken (token);
/* secondary token should be KEYWORD_NONE or KEYWORD_module token */
skipToNextStatement (token);
ancestorPop ();
}
/* execution-part
* executable-construct
*
* executable-construct is
* execution-part-construct [execution-part-construct]
*
* execution-part-construct
* is executable-construct
* or format-stmt
* or data-stmt
* or entry-stmt
*/
static bool parseExecutionPart (tokenInfo *const token)
{
bool result = false;
bool done = false;
while (! done && ! isType (token, TOKEN_EOF))
{
switch (token->keyword)
{
default:
if (isSubprogramPrefix (token))
readToken (token);
else
skipToNextStatement (token);
result = true;
break;
case KEYWORD_entry:
parseEntryStmt (token);
result = true;
break;
case KEYWORD_contains:
case KEYWORD_function:
case KEYWORD_subroutine:
done = true;
break;
case KEYWORD_end:
readSubToken (token);
if (isSecondaryKeyword (token, KEYWORD_do) ||
isSecondaryKeyword (token, KEYWORD_enum) ||
isSecondaryKeyword (token, KEYWORD_if) ||
isSecondaryKeyword (token, KEYWORD_select) ||
isSecondaryKeyword (token, KEYWORD_where) ||
isSecondaryKeyword (token, KEYWORD_forall) ||
isSecondaryKeyword (token, KEYWORD_associate) ||
isSecondaryKeyword (token, KEYWORD_block))
{
skipToNextStatement (token);
result = true;
}
else
done = true;
break;
}
}
return result;
}
static void makeSignature (tokenInfo *const token, void* signature)
{
if (isType (token, TOKEN_IDENTIFIER) || isType (token, TOKEN_KEYWORD))
vStringCat ((vString *)signature, token->string);
else if (isType (token, TOKEN_COMMA))
vStringCatS ((vString *)signature, ", ");
}
static vString* parseSignature (tokenInfo *const token)
{
vString* signature = vStringNew ();
readToken (token);
if (isType (token, TOKEN_PAREN_OPEN))
{
vStringPut (signature, '(');
skipOverParensFull (token, makeSignature, signature);
vStringPut (signature, ')');
}
return signature;
}
static void parseSubprogramFull (tokenInfo *const token, const tagType tag)
{
Assert (isKeyword (token, KEYWORD_program) ||
isKeyword (token, KEYWORD_function) ||
isKeyword (token, KEYWORD_subroutine));
readToken (token);
if (isType (token, TOKEN_IDENTIFIER))
{
tokenInfo* name = newTokenFrom (token);
if (tag == TAG_SUBROUTINE ||
tag == TAG_PROTOTYPE)
name->signature = parseSignature (token);
makeFortranTag (name, tag);
ancestorPush (name);
deleteToken (name);
}
else
ancestorPush (token);
skipToNextStatement (token);
parseSpecificationPart (token);
parseExecutionPart (token);
if (isKeyword (token, KEYWORD_contains))
parseInternalSubprogramPart (token);
/* should be at KEYWORD_end token */
readSubToken (token);
/* secondary token should be one of KEYWORD_NONE, KEYWORD_program,
* KEYWORD_function, KEYWORD_function
*/
skipToNextStatement (token);
ancestorPop ();
}
static tagType subprogramTagType (tokenInfo *const token)
{
tagType result = TAG_UNDEFINED;
if (insideInterface ())
result = TAG_PROTOTYPE;
else if (isKeyword (token, KEYWORD_subroutine))
result = TAG_SUBROUTINE;
else if (isKeyword (token, KEYWORD_function))
result = TAG_FUNCTION;
Assert (result != TAG_UNDEFINED);
return result;
}
/* function-subprogram is
* function-stmt (is [prefix] FUNCTION function-name etc.)
* [specification-part]
* [execution-part]
* [internal-subprogram-part]
* end-function-stmt (is END [FUNCTION [function-name]])
*
* prefix
* is type-spec [RECURSIVE]
* or [RECURSIVE] type-spec
*/
/* subroutine-subprogram is
* subroutine-stmt (is [RECURSIVE] SUBROUTINE subroutine-name etc.)
* [specification-part]
* [execution-part]
* [internal-subprogram-part]
* end-subroutine-stmt (is END [SUBROUTINE [function-name]])
*/
static void parseSubprogram (tokenInfo *const token)
{
parseSubprogramFull (token, subprogramTagType (token));
}
/* main-program is
* [program-stmt] (is PROGRAM program-name)
* [specification-part]
* [execution-part]
* [internal-subprogram-part ]
* end-program-stmt
*/
static void parseMainProgram (tokenInfo *const token)
{
parseSubprogramFull (token, TAG_PROGRAM);
}
/* program-unit
* is main-program
* or external-subprogram (is function-subprogram or subroutine-subprogram)
* or module
* or block-data
*/
static void parseProgramUnit (tokenInfo *const token)
{
readToken (token);
do
{
if (isType (token, TOKEN_STATEMENT_END))
readToken (token);
else switch (token->keyword)
{
case KEYWORD_block: parseBlockData (token); break;
case KEYWORD_end: skipToNextStatement (token); break;
case KEYWORD_function:
case KEYWORD_subroutine: parseSubprogram (token); break;
case KEYWORD_module: parseModule (token); break;
case KEYWORD_program: parseMainProgram (token); break;
default:
if (isSubprogramPrefix (token))
readToken (token);
else
{
bool one = parseSpecificationPart (token);
bool two = parseExecutionPart (token);
if (! (one || two))
readToken (token);
}
break;
}
} while (! isType (token, TOKEN_EOF));
}
static rescanReason findFortranTags (const unsigned int passCount)
{
tokenInfo *token;
rescanReason rescan;
Assert (passCount < 3);
token = newToken ();
FreeSourceForm = (bool) (passCount > 1);
Column = 0;
parseProgramUnit (token);
if (FreeSourceFormFound && ! FreeSourceForm)
{
verbose ("%s: not fixed source form; retry as free source form\n",
getInputFileName ());
rescan = RESCAN_FAILED;
}
else
{
rescan = RESCAN_NONE;
}
ancestorClear ();
deleteToken (token);
return rescan;
}
static void initialize (const langType language)
{
Lang_fortran = language;
}
extern parserDefinition* FortranParser (void)
{
static const char *const extensions [] = {
"f", "for", "ftn", "f77", "f90", "f95", "f03", "f08", "f15",
#ifndef CASE_INSENSITIVE_FILENAMES
"F", "FOR", "FTN", "F77", "F90", "F95", "F03", "F08", "F15",
#endif
NULL
};
parserDefinition* def = parserNew ("Fortran");
def->kinds = FortranKinds;
def->kindCount = ARRAY_SIZE (FortranKinds);
def->extensions = extensions;
def->parser2 = findFortranTags;
def->initialize = initialize;
def->keywordTable = FortranKeywordTable;
def->keywordCount = ARRAY_SIZE (FortranKeywordTable);
return def;
}
| 1 | 14,426 | This leads to a small "leak" (mostly theoretical only though), if we don't want it this could be created in `initialize()` and destroyed in `finalize()`. | universal-ctags-ctags | c |
@@ -25,6 +25,8 @@ import (
)
const (
+ // BuildIDOSEnv is the os env name to get build id
+ BuildIDOSEnv = "BUILD_ID"
translateFailedPrefix = "TranslateFailed"
privacyInfoReplacement = "[Privacy Info]"
) | 1 | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy"
"google.golang.org/api/compute/v1"
)
const (
translateFailedPrefix = "TranslateFailed"
privacyInfoReplacement = "[Privacy Info]"
)
var (
osChoices = map[string]string{
"debian-8": "debian/translate_debian_8.wf.json",
"debian-9": "debian/translate_debian_9.wf.json",
"centos-6": "enterprise_linux/translate_centos_6.wf.json",
"centos-7": "enterprise_linux/translate_centos_7.wf.json",
"rhel-6": "enterprise_linux/translate_rhel_6_licensed.wf.json",
"rhel-6-byol": "enterprise_linux/translate_rhel_6_byol.wf.json",
"rhel-7": "enterprise_linux/translate_rhel_7_licensed.wf.json",
"rhel-7-byol": "enterprise_linux/translate_rhel_7_byol.wf.json",
"ubuntu-1404": "ubuntu/translate_ubuntu_1404.wf.json",
"ubuntu-1604": "ubuntu/translate_ubuntu_1604.wf.json",
"windows-2008r2": "windows/translate_windows_2008_r2.wf.json",
"windows-2008r2-byol": "windows/translate_windows_2008_r2_byol.wf.json",
"windows-2012": "windows/translate_windows_2012.wf.json",
"windows-2012-byol": "windows/translate_windows_2012_byol.wf.json",
"windows-2012r2": "windows/translate_windows_2012_r2.wf.json",
"windows-2012r2-byol": "windows/translate_windows_2012_r2_byol.wf.json",
"windows-2016": "windows/translate_windows_2016.wf.json",
"windows-2016-byol": "windows/translate_windows_2016_byol.wf.json",
"windows-2019": "windows/translate_windows_2019.wf.json",
"windows-2019-byol": "windows/translate_windows_2019_byol.wf.json",
"windows-7-byol": "windows/translate_windows_7_byol.wf.json",
"windows-8-1-x64-byol": "windows/translate_windows_8-1_x64_byol.wf.json",
"windows-10-byol": "windows/translate_windows_10_byol.wf.json",
}
privacyRegex = regexp.MustCompile(`\[Privacy\->.*?<\-Privacy\]`)
)
// ValidateOS validates that osID is supported by Daisy image import
func ValidateOS(osID string) error {
if osID == "" {
return daisy.Errf("osID is empty")
}
if _, osValid := osChoices[osID]; !osValid {
// Expose osID and osChoices in the anonymized error message since they are not sensitive values.
errMsg := fmt.Sprintf("os `%v` is invalid. Allowed values: %v", osID, reflect.ValueOf(osChoices).MapKeys())
return daisy.Errf(errMsg)
}
return nil
}
// GetTranslateWorkflowPath returns path to image translate workflow path for given OS
func GetTranslateWorkflowPath(os string) string {
return osChoices[os]
}
// UpdateAllInstanceNoExternalIP updates all Create Instance steps in the workflow to operate
// when no external IP access is allowed by the VPC Daisy workflow is running in.
func UpdateAllInstanceNoExternalIP(workflow *daisy.Workflow, noExternalIP bool) {
if !noExternalIP {
return
}
updateAllInstanceAccessConfig(workflow, func() []*compute.AccessConfig {
return []*compute.AccessConfig{}
})
}
func updateAllInstanceAccessConfig(workflow *daisy.Workflow, accessConfigProvider func() []*compute.AccessConfig) {
for _, step := range workflow.Steps {
if step.IncludeWorkflow != nil {
//recurse into included workflow
updateAllInstanceAccessConfig(step.IncludeWorkflow.Workflow, accessConfigProvider)
}
if step.CreateInstances != nil {
for _, instance := range *step.CreateInstances {
if instance.Instance.NetworkInterfaces == nil {
return
}
for _, networkInterface := range instance.Instance.NetworkInterfaces {
networkInterface.AccessConfigs = accessConfigProvider()
}
}
}
}
}
// RemovePrivacyLogInfo removes privacy log information.
func RemovePrivacyLogInfo(message string) string {
// Since translation scripts vary and is hard to predict the output, we have to hide the
// details and only remain "TranslateFailed"
if strings.Contains(message, translateFailedPrefix) {
return translateFailedPrefix
}
// All import/export bash scripts enclose privacy info inside "[Privacy-> XXX <-Privacy]". Let's
// remove it for privacy.
message = privacyRegex.ReplaceAllString(message, privacyInfoReplacement)
return message
}
| 1 | 9,405 | BuildIDOSEnvVarName or similar, otherwise it sounds it's actually holding actual value of env var | GoogleCloudPlatform-compute-image-tools | go |
@@ -103,7 +103,7 @@ class SparkReader:
"""
with self.app.app_context():
-
+ current_app.logger.info('Spark consumer has started!')
while True:
self.init_rabbitmq_connection()
self.incoming_ch = utils.create_channel_to_consume( | 1 | import json
import logging
import time
from flask import current_app
import pika
import sqlalchemy
import ujson
from listenbrainz import utils
from listenbrainz.db import stats as db_stats
from listenbrainz.db import user as db_user
from listenbrainz.db.exceptions import DatabaseException
from listenbrainz.spark.handlers import (handle_candidate_sets,
handle_dataframes,
handle_dump_imported, handle_model,
handle_recommendations,
handle_user_daily_activity,
handle_user_entity,
handle_user_listening_activity,
handle_sitewide_entity,
notify_artist_relation_import,
notify_mapping_import,
handle_missing_musicbrainz_data,
notify_cf_recording_recommendations_generation)
from listenbrainz.webserver import create_app
response_handler_map = {
'user_entity': handle_user_entity,
'user_listening_activity': handle_user_listening_activity,
'user_daily_activity': handle_user_daily_activity,
'sitewide_entity': handle_sitewide_entity,
'import_full_dump': handle_dump_imported,
'import_incremental_dump': handle_dump_imported,
'cf_recommendations_recording_dataframes': handle_dataframes,
'cf_recommendations_recording_model': handle_model,
'cf_recommendations_recording_candidate_sets': handle_candidate_sets,
'cf_recommendations_recording_recommendations': handle_recommendations,
'import_mapping': notify_mapping_import,
'import_artist_relation': notify_artist_relation_import,
'missing_musicbrainz_data': handle_missing_musicbrainz_data,
'cf_recommendations_recording_mail': notify_cf_recording_recommendations_generation
}
RABBITMQ_HEARTBEAT_TIME = 60 * 60 # 1 hour, in seconds
class SparkReader:
def __init__(self):
self.app = create_app() # creating a flask app for config values and logging to Sentry
def get_response_handler(self, response_type):
return response_handler_map[response_type]
def init_rabbitmq_connection(self):
""" Initializes the connection to RabbitMQ.
Note: this is a blocking function which keeps retrying if it fails
to connect to RabbitMQ
"""
self.connection = utils.connect_to_rabbitmq(
username=current_app.config['RABBITMQ_USERNAME'],
password=current_app.config['RABBITMQ_PASSWORD'],
host=current_app.config['RABBITMQ_HOST'],
port=current_app.config['RABBITMQ_PORT'],
virtual_host=current_app.config['RABBITMQ_VHOST'],
error_logger=current_app.logger.error,
heartbeat=RABBITMQ_HEARTBEAT_TIME,
)
def process_response(self, response):
try:
response_type = response['type']
except KeyError:
current_app.logger.error('Bad response sent to spark_reader: %s', json.dumps(response, indent=4), exc_info=True)
return
try:
response_handler = self.get_response_handler(response_type)
except Exception:
current_app.logger.error('Unknown response type: %s, doing nothing.', response_type, exc_info=True)
return
try:
response_handler(response)
except Exception as e:
current_app.logger.error('Error in the response handler: %s, data: %s',
str(e), json.dumps(response, indent=4), exc_info=True)
return
def callback(self, ch, method, properties, body):
""" Handle the data received from the queue and
insert into the database accordingly.
"""
current_app.logger.debug("Received a message, processing...")
response = ujson.loads(body)
self.process_response(response)
current_app.logger.debug("Done!")
def start(self):
""" initiates RabbitMQ connection and starts consuming from the queue
"""
with self.app.app_context():
while True:
self.init_rabbitmq_connection()
self.incoming_ch = utils.create_channel_to_consume(
connection=self.connection,
exchange=current_app.config['SPARK_RESULT_EXCHANGE'],
queue=current_app.config['SPARK_RESULT_QUEUE'],
callback_function=self.callback,
no_ack=True,
)
self.incoming_ch.basic_qos(prefetch_count=1)
current_app.logger.info('Spark consumer started!')
try:
self.incoming_ch.start_consuming()
except pika.exceptions.ConnectionClosed:
self.connection = None
continue
self.connection.close()
if __name__ == '__main__':
sr = SparkReader()
sr.start()
| 1 | 18,227 | I like this standard "container has started" message. Should we have a "container exited because FOO" message as well? | metabrainz-listenbrainz-server | py |
@@ -19,6 +19,8 @@ package org.openqa.grid.web;
import com.google.common.collect.Maps;
+import com.sun.org.glassfish.gmbal.ManagedObject;
+
import org.openqa.grid.internal.Registry;
import org.openqa.grid.internal.utils.GridHubConfiguration;
import org.openqa.grid.web.servlet.DisplayHelpServlet; | 1 | /*
Copyright 2011 Selenium committers
Copyright 2011 Software Freedom Conservancy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.grid.web;
import com.google.common.collect.Maps;
import org.openqa.grid.internal.Registry;
import org.openqa.grid.internal.utils.GridHubConfiguration;
import org.openqa.grid.web.servlet.DisplayHelpServlet;
import org.openqa.grid.web.servlet.DriverServlet;
import org.openqa.grid.web.servlet.Grid1HeartbeatServlet;
import org.openqa.grid.web.servlet.HubStatusServlet;
import org.openqa.grid.web.servlet.LifecycleServlet;
import org.openqa.grid.web.servlet.ProxyStatusServlet;
import org.openqa.grid.web.servlet.RegistrationServlet;
import org.openqa.grid.web.servlet.ResourceServlet;
import org.openqa.grid.web.servlet.TestSessionStatusServlet;
import org.openqa.grid.web.servlet.beta.ConsoleServlet;
import org.openqa.grid.web.utils.ExtraServletUtil;
import org.openqa.selenium.net.NetworkUtils;
import org.seleniumhq.jetty7.server.Server;
import org.seleniumhq.jetty7.server.bio.SocketConnector;
import org.seleniumhq.jetty7.servlet.ServletContextHandler;
import org.seleniumhq.jetty7.util.thread.QueuedThreadPool;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Map;
import java.util.logging.Logger;
import javax.servlet.Servlet;
/**
* Jetty server. Main entry point for everything about the grid. <p/> Except for unit tests, this
* should be a singleton.
*/
public class Hub {
private static final Logger log = Logger.getLogger(Hub.class.getName());
private final int port;
private final String host;
private final int maxThread;
private final boolean isHostRestricted;
private final Registry registry;
private final Map<String, Class<? extends Servlet>> extraServlet = Maps.newHashMap();
private Server server;
private void addServlet(String key, Class<? extends Servlet> s) {
extraServlet.put(key, s);
}
/**
* get the registry backing up the hub state.
*
* @return The registry
*/
public Registry getRegistry() {
return registry;
}
public Hub(GridHubConfiguration config) {
registry = Registry.newInstance(this, config);
maxThread = config.getJettyMaxThreads();
if (config.getHost() != null) {
host = config.getHost();
isHostRestricted = true;
} else {
NetworkUtils utils = new NetworkUtils();
host = utils.getIp4NonLoopbackAddressOfThisMachine().getHostAddress();
isHostRestricted = false;
}
this.port = config.getPort();
for (String s : config.getServlets()) {
Class<? extends Servlet> servletClass = ExtraServletUtil.createServlet(s);
if (servletClass != null) {
String path = "/grid/admin/" + servletClass.getSimpleName() + "/*";
log.info("binding " + servletClass.getCanonicalName() + " to " + path);
addServlet(path, servletClass);
}
}
initServer();
}
private void initServer() {
try {
server = new Server();
SocketConnector socketListener = new SocketConnector();
socketListener.setMaxIdleTime(60000);
if (isHostRestricted) {
socketListener.setHost(host);
}
socketListener.setPort(port);
socketListener.setLowResourcesMaxIdleTime(6000);
server.addConnector(socketListener);
ServletContextHandler root = new ServletContextHandler(ServletContextHandler.SESSIONS);
root.setContextPath("/");
server.setHandler(root);
root.setAttribute(Registry.KEY, registry);
root.addServlet(DisplayHelpServlet.class.getName(), "/*");
root.addServlet(ConsoleServlet.class.getName(), "/grid/console/*");
root.addServlet(ConsoleServlet.class.getName(), "/grid/beta/console/*");
root.addServlet(org.openqa.grid.web.servlet.ConsoleServlet.class.getName(), "/grid/old/console/*");
root.addServlet(RegistrationServlet.class.getName(), "/grid/register/*");
// TODO remove at some point. Here for backward compatibility of
// tests etc.
root.addServlet(DriverServlet.class.getName(), "/grid/driver/*");
root.addServlet(DriverServlet.class.getName(), "/wd/hub/*");
root.addServlet(DriverServlet.class.getName(), "/selenium-server/driver/*");
root.addServlet(ResourceServlet.class.getName(), "/grid/resources/*");
root.addServlet(ProxyStatusServlet.class.getName(), "/grid/api/proxy/*");
root.addServlet(HubStatusServlet.class.getName(), "/grid/api/hub/*");
root.addServlet(TestSessionStatusServlet.class.getName(), "/grid/api/testsession/*");
root.addServlet(LifecycleServlet.class.getName(), "/lifecycle-manager/*");
// Selenium Grid 1.0 compatibility routes for older nodes trying to
// work with the newer hub.
root.addServlet(RegistrationServlet.class.getName(), "/registration-manager/register/*");
root.addServlet(Grid1HeartbeatServlet.class.getName(), "/heartbeat");
// Load any additional servlets provided by the user.
for (Map.Entry<String, Class<? extends Servlet>> entry : extraServlet.entrySet()) {
root.addServlet(entry.getValue().getName(), entry.getKey());
}
} catch (Throwable e) {
throw new RuntimeException("Error initializing the hub" + e.getMessage(), e);
}
}
public int getPort() {
return port;
}
public String getHost() {
return host;
}
public void start() throws Exception {
initServer();
if (maxThread>0){
QueuedThreadPool pool = new QueuedThreadPool();
pool.setMaxThreads(maxThread);
server.setThreadPool(pool);
}
server.start();
}
public void stop() throws Exception {
server.stop();
}
public URL getUrl() {
try {
return new URL("http://" + getHost() + ":" + getPort());
} catch (MalformedURLException e) {
throw new RuntimeException(e.getMessage());
}
}
public URL getRegistrationURL() {
String uri = "http://" + getHost() + ":" + getPort() + "/grid/register/";
try {
return new URL(uri);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
}
| 1 | 11,533 | And again. The reason it's bad is that if someone uses a JDK not produced by Oracle they won't have this class. | SeleniumHQ-selenium | py |
@@ -0,0 +1,19 @@
+const { useHandsontable } = require('./use-handsontable');
+const { getDependencies } = require('./dependencies');
+const { register } = require('./register');
+
+if (module) {
+ module.exports = {
+ useHandsontable,
+ instanceRegister: register,
+ getDependencies,
+ applyToWindow: () => {
+ /* eslint-disable no-restricted-globals */
+ if ((typeof window !== 'undefined')) {
+ window.useHandsontable = useHandsontable;
+ window.instanceRegister = register;
+ }
+ /* eslint-enable no-restricted-globals */
+ }
+ };
+} | 1 | 1 | 18,106 | We have Vue application at our disposal, and have examples container implemented as component. Shouldn't helpers be imported instead being global? Not the best practice in Vue app I guess | handsontable-handsontable | js |
|
@@ -1221,3 +1221,14 @@ type NoMergedMDError struct {
func (e NoMergedMDError) Error() string {
return fmt.Sprintf("No MD yet for TLF %s", e.tlf)
}
+
+// DiskCacheClosedError indicates that the disk cache has been
+// closed, and thus isn't accepting any more operations.
+type DiskCacheClosedError struct {
+ op string
+}
+
+// Error implements the error interface for DiskCacheClosedError.
+func (e DiskCacheClosedError) Error() string {
+ return fmt.Sprintf("Error performing %s operation: the disk cache is closed.", e.op)
+} | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/tlf"
)
// ErrorFile is the name of the virtual file in KBFS that should
// contain the last reported error(s).
var ErrorFile = ".kbfs_error"
// WrapError simply wraps an error in a fmt.Stringer interface, so
// that it can be reported.
type WrapError struct {
Err error
}
// String implements the fmt.Stringer interface for WrapError
func (e WrapError) String() string {
return e.Err.Error()
}
// NameExistsError indicates that the user tried to create an entry
// for a name that already existed in a subdirectory.
type NameExistsError struct {
Name string
}
// Error implements the error interface for NameExistsError
func (e NameExistsError) Error() string {
return fmt.Sprintf("%s already exists", e.Name)
}
// NoSuchNameError indicates that the user tried to access a
// subdirectory entry that doesn't exist.
type NoSuchNameError struct {
Name string
}
// Error implements the error interface for NoSuchNameError
func (e NoSuchNameError) Error() string {
return fmt.Sprintf("%s doesn't exist", e.Name)
}
// NoSuchUserError indicates that the given user couldn't be resolved.
type NoSuchUserError struct {
Input string
}
// Error implements the error interface for NoSuchUserError
func (e NoSuchUserError) Error() string {
return fmt.Sprintf("%s is not a Keybase user", e.Input)
}
// ToStatus implements the keybase1.ToStatusAble interface for NoSuchUserError
func (e NoSuchUserError) ToStatus() keybase1.Status {
return keybase1.Status{
Name: "NotFound",
Code: int(keybase1.StatusCode_SCNotFound),
Desc: e.Error(),
}
}
// BadTLFNameError indicates a top-level folder name that has an
// incorrect format.
type BadTLFNameError struct {
Name string
}
// Error implements the error interface for BadTLFNameError.
func (e BadTLFNameError) Error() string {
return fmt.Sprintf("TLF name %s is in an incorrect format", e.Name)
}
// InvalidBlockRefError indicates an invalid block reference was
// encountered.
type InvalidBlockRefError struct {
ref BlockRef
}
func (e InvalidBlockRefError) Error() string {
return fmt.Sprintf("Invalid block ref %s", e.ref)
}
// InvalidPathError indicates an invalid path was encountered.
type InvalidPathError struct {
p path
}
// Error implements the error interface for InvalidPathError.
func (e InvalidPathError) Error() string {
return fmt.Sprintf("Invalid path %s", e.p.DebugString())
}
// InvalidParentPathError indicates a path without a valid parent was
// encountered.
type InvalidParentPathError struct {
p path
}
// Error implements the error interface for InvalidParentPathError.
func (e InvalidParentPathError) Error() string {
return fmt.Sprintf("Path with invalid parent %s", e.p.DebugString())
}
// DirNotEmptyError indicates that the user tried to unlink a
// subdirectory that was not empty.
type DirNotEmptyError struct {
Name string
}
// Error implements the error interface for DirNotEmptyError
func (e DirNotEmptyError) Error() string {
return fmt.Sprintf("Directory %s is not empty and can't be removed", e.Name)
}
// TlfAccessError that the user tried to perform an unpermitted
// operation on a top-level folder.
type TlfAccessError struct {
ID tlf.ID
}
// Error implements the error interface for TlfAccessError
func (e TlfAccessError) Error() string {
return fmt.Sprintf("Operation not permitted on folder %s", e.ID)
}
// RenameAcrossDirsError indicates that the user tried to do an atomic
// rename across directories.
type RenameAcrossDirsError struct {
}
// Error implements the error interface for RenameAcrossDirsError
func (e RenameAcrossDirsError) Error() string {
return fmt.Sprintf("Cannot rename across directories")
}
// ErrorFileAccessError indicates that the user tried to perform an
// operation on the ErrorFile that is not allowed.
type ErrorFileAccessError struct {
}
// Error implements the error interface for ErrorFileAccessError
func (e ErrorFileAccessError) Error() string {
return fmt.Sprintf("Operation not allowed on file %s", ErrorFile)
}
// ReadAccessError indicates that the user tried to read from a
// top-level folder without read permission.
type ReadAccessError struct {
User libkb.NormalizedUsername
Filename string
Tlf CanonicalTlfName
Public bool
}
// Error implements the error interface for ReadAccessError
func (e ReadAccessError) Error() string {
return fmt.Sprintf("%s does not have read access to directory %s",
e.User, buildCanonicalPathForTlfName(e.Public, e.Tlf))
}
// WriteAccessError indicates an error when trying to write a file
type WriteAccessError struct {
User libkb.NormalizedUsername
Filename string
Tlf CanonicalTlfName
Public bool
}
// Error implements the error interface for WriteAccessError
func (e WriteAccessError) Error() string {
if e.Tlf != "" {
return fmt.Sprintf("%s does not have write access to directory %s",
e.User, buildCanonicalPathForTlfName(e.Public, e.Tlf))
}
return fmt.Sprintf("%s does not have write access to %s", e.User, e.Filename)
}
// WriteUnsupportedError indicates an error when trying to write a file
type WriteUnsupportedError struct {
Filename string
}
// Error implements the error interface for WriteAccessError
func (e WriteUnsupportedError) Error() string {
return fmt.Sprintf("Writing to %s is unsupported", e.Filename)
}
// NewReadAccessError constructs a ReadAccessError for the given
// directory and user.
func NewReadAccessError(h *TlfHandle, username libkb.NormalizedUsername, filename string) error {
tlfname := h.GetCanonicalName()
return ReadAccessError{
User: username,
Filename: filename,
Tlf: tlfname,
Public: h.IsPublic(),
}
}
// NewWriteAccessError is an access error trying to write a file
func NewWriteAccessError(h *TlfHandle, username libkb.NormalizedUsername, filename string) error {
tlf := CanonicalTlfName("")
public := false
if h != nil {
tlf = h.GetCanonicalName()
public = h.IsPublic()
}
return WriteAccessError{
User: username,
Filename: filename,
Tlf: tlf,
Public: public,
}
}
// NewWriteUnsupportedError returns unsupported error trying to write a file
func NewWriteUnsupportedError(filename string) error {
return WriteUnsupportedError{
Filename: filename,
}
}
// NeedSelfRekeyError indicates that the folder in question needs to
// be rekeyed for the local device, and can be done so by one of the
// other user's devices.
type NeedSelfRekeyError struct {
Tlf CanonicalTlfName
Err error
}
// Error implements the error interface for NeedSelfRekeyError
func (e NeedSelfRekeyError) Error() string {
return fmt.Sprintf("This device does not yet have read access to "+
"directory %s, log into Keybase from one of your other "+
"devices to grant access: %+v", buildCanonicalPathForTlfName(false, e.Tlf), e.Err)
}
// ToStatus exports error to status
func (e NeedSelfRekeyError) ToStatus() keybase1.Status {
kv := keybase1.StringKVPair{
Key: "Tlf",
Value: string(e.Tlf),
}
return keybase1.Status{
Code: int(keybase1.StatusCode_SCNeedSelfRekey),
Name: "SC_NEED_SELF_REKEY",
Desc: e.Error(),
Fields: []keybase1.StringKVPair{kv},
}
}
// NeedOtherRekeyError indicates that the folder in question needs to
// be rekeyed for the local device, and can only done so by one of the
// other users.
type NeedOtherRekeyError struct {
Tlf CanonicalTlfName
Err error
}
// Error implements the error interface for NeedOtherRekeyError
func (e NeedOtherRekeyError) Error() string {
return fmt.Sprintf("This device does not yet have read access to "+
"directory %s, ask one of the other directory participants to "+
"log into Keybase to grant you access automatically: %+v",
buildCanonicalPathForTlfName(false, e.Tlf), e.Err)
}
// ToStatus exports error to status
func (e NeedOtherRekeyError) ToStatus() keybase1.Status {
kv := keybase1.StringKVPair{
Key: "Tlf",
Value: string(e.Tlf),
}
return keybase1.Status{
Code: int(keybase1.StatusCode_SCNeedOtherRekey),
Name: "SC_NEED_OTHER_REKEY",
Desc: e.Error(),
Fields: []keybase1.StringKVPair{kv},
}
}
// NotFileBlockError indicates that a file block was expected but a
// block of a different type was found.
//
// ptr and branch should be filled in, but p may be empty.
type NotFileBlockError struct {
ptr BlockPointer
branch BranchName
p path
}
func (e NotFileBlockError) Error() string {
return fmt.Sprintf("The block at %s is not a file block (branch=%s, path=%s)", e.ptr, e.branch, e.p)
}
// NotDirBlockError indicates that a file block was expected but a
// block of a different type was found.
//
// ptr and branch should be filled in, but p may be empty.
type NotDirBlockError struct {
ptr BlockPointer
branch BranchName
p path
}
func (e NotDirBlockError) Error() string {
return fmt.Sprintf("The block at %s is not a dir block (branch=%s, path=%s)", e.ptr, e.branch, e.p)
}
// NotFileError indicates that the user tried to perform a
// file-specific operation on something that isn't a file.
type NotFileError struct {
path path
}
// Error implements the error interface for NotFileError
func (e NotFileError) Error() string {
return fmt.Sprintf("%s is not a file (folder %s)", e.path, e.path.Tlf)
}
// NotDirError indicates that the user tried to perform a
// dir-specific operation on something that isn't a directory.
type NotDirError struct {
path path
}
// Error implements the error interface for NotDirError
func (e NotDirError) Error() string {
return fmt.Sprintf("%s is not a directory (folder %s)", e.path, e.path.Tlf)
}
// BlockDecodeError indicates that a block couldn't be decoded as
// expected; probably it is the wrong type.
type BlockDecodeError struct {
decodeErr error
}
// Error implements the error interface for BlockDecodeError
func (e BlockDecodeError) Error() string {
return fmt.Sprintf("Decode error for a block: %v", e.decodeErr)
}
// BadDataError indicates that KBFS is storing corrupt data for a block.
type BadDataError struct {
ID kbfsblock.ID
}
// Error implements the error interface for BadDataError
func (e BadDataError) Error() string {
return fmt.Sprintf("Bad data for block %v", e.ID)
}
// NoSuchBlockError indicates that a block for the associated ID doesn't exist.
type NoSuchBlockError struct {
ID kbfsblock.ID
}
// Error implements the error interface for NoSuchBlockError
func (e NoSuchBlockError) Error() string {
return fmt.Sprintf("Couldn't get block %v", e.ID)
}
// BadCryptoError indicates that KBFS performed a bad crypto operation.
type BadCryptoError struct {
ID kbfsblock.ID
}
// Error implements the error interface for BadCryptoError
func (e BadCryptoError) Error() string {
return fmt.Sprintf("Bad crypto for block %v", e.ID)
}
// BadCryptoMDError indicates that KBFS performed a bad crypto
// operation, specifically on a MD object.
type BadCryptoMDError struct {
ID tlf.ID
}
// Error implements the error interface for BadCryptoMDError
func (e BadCryptoMDError) Error() string {
return fmt.Sprintf("Bad crypto for the metadata of directory %v", e.ID)
}
// BadMDError indicates that the system is storing corrupt MD object
// for the given TLF ID.
type BadMDError struct {
ID tlf.ID
}
// Error implements the error interface for BadMDError
func (e BadMDError) Error() string {
return fmt.Sprintf("Wrong format for metadata for directory %v", e.ID)
}
// MDMissingDataError indicates that we are trying to take get the
// metadata ID of a MD object with no serialized data field.
type MDMissingDataError struct {
ID tlf.ID
}
// Error implements the error interface for MDMissingDataError
func (e MDMissingDataError) Error() string {
return fmt.Sprintf("No serialized private data in the metadata "+
"for directory %v", e.ID)
}
// MDMismatchError indicates an inconsistent or unverifiable MD object
// for the given top-level folder.
type MDMismatchError struct {
Revision MetadataRevision
Dir string
TlfID tlf.ID
Err error
}
// Error implements the error interface for MDMismatchError
func (e MDMismatchError) Error() string {
return fmt.Sprintf("Could not verify metadata (revision=%d) for directory %s (id=%s): %s",
e.Revision, e.Dir, e.TlfID, e.Err)
}
// NoSuchMDError indicates that there is no MD object for the given
// folder, revision, and merged status.
type NoSuchMDError struct {
Tlf tlf.ID
Rev MetadataRevision
BID BranchID
}
// Error implements the error interface for NoSuchMDError
func (e NoSuchMDError) Error() string {
return fmt.Sprintf("Couldn't get metadata for folder %v, revision %d, "+
"%s", e.Tlf, e.Rev, e.BID)
}
// InvalidMetadataVersionError indicates that an invalid metadata version was
// used.
type InvalidMetadataVersionError struct {
Tlf tlf.ID
MetadataVer MetadataVer
}
// Error implements the error interface for InvalidMetadataVersionError.
func (e InvalidMetadataVersionError) Error() string {
return fmt.Sprintf("Invalid metadata version %d for folder %s",
int(e.MetadataVer), e.Tlf)
}
// NewMetadataVersionError indicates that the metadata for the given
// folder has been written using a new metadata version that our
// client doesn't understand.
type NewMetadataVersionError struct {
Tlf tlf.ID
MetadataVer MetadataVer
}
// Error implements the error interface for NewMetadataVersionError.
func (e NewMetadataVersionError) Error() string {
return fmt.Sprintf(
"The metadata for folder %s is of a version (%d) that we can't read",
e.Tlf, e.MetadataVer)
}
// InvalidDataVersionError indicates that an invalid data version was
// used.
type InvalidDataVersionError struct {
DataVer DataVer
}
// Error implements the error interface for InvalidDataVersionError.
func (e InvalidDataVersionError) Error() string {
return fmt.Sprintf("Invalid data version %d", int(e.DataVer))
}
// NewDataVersionError indicates that the data at the given path has
// been written using a new data version that our client doesn't
// understand.
type NewDataVersionError struct {
path path
DataVer DataVer
}
// Error implements the error interface for NewDataVersionError.
func (e NewDataVersionError) Error() string {
return fmt.Sprintf(
"The data at path %s is of a version (%d) that we can't read "+
"(in folder %s)",
e.path, e.DataVer, e.path.Tlf)
}
// OutdatedVersionError indicates that we have encountered some new
// data version we don't understand, and the user should be prompted
// to upgrade.
type OutdatedVersionError struct {
}
// Error implements the error interface for OutdatedVersionError.
func (e OutdatedVersionError) Error() string {
return "Your software is out of date, and cannot read this data. " +
"Please use `keybase update check` to upgrade your software."
}
// InvalidKeyGenerationError indicates that an invalid key generation
// was used.
type InvalidKeyGenerationError struct {
TlfID tlf.ID
KeyGen KeyGen
}
// Error implements the error interface for InvalidKeyGenerationError.
func (e InvalidKeyGenerationError) Error() string {
return fmt.Sprintf("Invalid key generation %d for %s", int(e.KeyGen), e.TlfID)
}
// NewKeyGenerationError indicates that the data at the given path has
// been written using keys that our client doesn't have.
type NewKeyGenerationError struct {
TlfID tlf.ID
KeyGen KeyGen
}
// Error implements the error interface for NewKeyGenerationError.
func (e NewKeyGenerationError) Error() string {
return fmt.Sprintf(
"The data for %v is keyed with a key generation (%d) that "+
"we don't know", e.TlfID, e.KeyGen)
}
// BadSplitError indicates that the BlockSplitter has an error.
type BadSplitError struct {
}
// Error implements the error interface for BadSplitError
func (e BadSplitError) Error() string {
return "Unexpected bad block split"
}
// TooLowByteCountError indicates that size of a block is smaller than
// the expected size.
type TooLowByteCountError struct {
ExpectedMinByteCount int
ByteCount int
}
// Error implements the error interface for TooLowByteCountError
func (e TooLowByteCountError) Error() string {
return fmt.Sprintf("Expected at least %d bytes, got %d bytes",
e.ExpectedMinByteCount, e.ByteCount)
}
// InconsistentEncodedSizeError is raised when a dirty block has a
// non-zero encoded size.
type InconsistentEncodedSizeError struct {
info BlockInfo
}
// Error implements the error interface for InconsistentEncodedSizeError
func (e InconsistentEncodedSizeError) Error() string {
return fmt.Sprintf("Block pointer to dirty block %v with non-zero "+
"encoded size = %d bytes", e.info.ID, e.info.EncodedSize)
}
// MDWriteNeededInRequest indicates that the system needs MD write
// permissions to successfully complete an operation, so it should
// retry in mdWrite mode.
type MDWriteNeededInRequest struct {
}
// Error implements the error interface for MDWriteNeededInRequest
func (e MDWriteNeededInRequest) Error() string {
return "This request needs MD write access, but doesn't have it."
}
// KeyNotFoundError indicates that a key matching the given KID
// couldn't be found.
type KeyNotFoundError struct {
kid keybase1.KID
}
// Error implements the error interface for KeyNotFoundError.
func (e KeyNotFoundError) Error() string {
return fmt.Sprintf("Could not find key with kid=%s", e.kid)
}
// UnverifiableTlfUpdateError indicates that a MD update could not be
// verified.
type UnverifiableTlfUpdateError struct {
Tlf string
User libkb.NormalizedUsername
Err error
}
// Error implements the error interface for UnverifiableTlfUpdateError.
func (e UnverifiableTlfUpdateError) Error() string {
return fmt.Sprintf("%s was last written by an unknown device claiming "+
"to belong to user %s. The device has possibly been revoked by the "+
"user. Use `keybase log send` to file an issue with the Keybase "+
"admins.", e.Tlf, e.User)
}
// KeyCacheMissError indicates that a key matching the given TLF ID
// and key generation wasn't found in cache.
type KeyCacheMissError struct {
tlf tlf.ID
keyGen KeyGen
}
// Error implements the error interface for KeyCacheMissError.
func (e KeyCacheMissError) Error() string {
return fmt.Sprintf("Could not find key with tlf=%s, keyGen=%d", e.tlf, e.keyGen)
}
// KeyCacheHitError indicates that a key matching the given TLF ID
// and key generation was found in cache but the object type was unknown.
type KeyCacheHitError struct {
tlf tlf.ID
keyGen KeyGen
}
// Error implements the error interface for KeyCacheHitError.
func (e KeyCacheHitError) Error() string {
return fmt.Sprintf("Invalid key with tlf=%s, keyGen=%d", e.tlf, e.keyGen)
}
// UnknownEncryptionVer indicates that we can't decrypt an
// encryptedData object because it has an unknown version.
type UnknownEncryptionVer struct {
ver EncryptionVer
}
// Error implements the error interface for UnknownEncryptionVer.
func (e UnknownEncryptionVer) Error() string {
return fmt.Sprintf("Unknown encryption version %d", int(e.ver))
}
// InvalidNonceError indicates that an invalid cryptographic nonce was
// detected.
type InvalidNonceError struct {
nonce []byte
}
// Error implements the error interface for InvalidNonceError.
func (e InvalidNonceError) Error() string {
return fmt.Sprintf("Invalid nonce %v", e.nonce)
}
// NoKeysError indicates that no keys were provided for a decryption allowing
// multiple device keys
type NoKeysError struct{}
func (e NoKeysError) Error() string {
return "No keys provided"
}
// InvalidPublicTLFOperation indicates that an invalid operation was
// attempted on a public TLF.
type InvalidPublicTLFOperation struct {
id tlf.ID
opName string
ver MetadataVer
}
// Error implements the error interface for InvalidPublicTLFOperation.
func (e InvalidPublicTLFOperation) Error() string {
return fmt.Sprintf("Tried to do invalid operation %s on public TLF %v (ver=%v)",
e.opName, e.id, e.ver)
}
// WrongOpsError indicates that an unexpected path got passed into a
// FolderBranchOps instance
type WrongOpsError struct {
nodeFB FolderBranch
opsFB FolderBranch
}
// Error implements the error interface for WrongOpsError.
func (e WrongOpsError) Error() string {
return fmt.Sprintf("Ops for folder %v, branch %s, was given path %s, "+
"branch %s", e.opsFB.Tlf, e.opsFB.Branch, e.nodeFB.Tlf, e.nodeFB.Branch)
}
// NodeNotFoundError indicates that we tried to find a node for the
// given BlockPointer and failed.
type NodeNotFoundError struct {
ptr BlockPointer
}
// Error implements the error interface for NodeNotFoundError.
func (e NodeNotFoundError) Error() string {
return fmt.Sprintf("No node found for pointer %v", e.ptr)
}
// ParentNodeNotFoundError indicates that we tried to update a Node's
// parent with a BlockPointer that we don't yet know about.
type ParentNodeNotFoundError struct {
parent BlockRef
}
// Error implements the error interface for ParentNodeNotFoundError.
func (e ParentNodeNotFoundError) Error() string {
return fmt.Sprintf("No such parent node found for %v", e.parent)
}
// EmptyNameError indicates that the user tried to use an empty name
// for the given BlockRef.
type EmptyNameError struct {
ref BlockRef
}
// Error implements the error interface for EmptyNameError.
func (e EmptyNameError) Error() string {
return fmt.Sprintf("Cannot use empty name for %v", e.ref)
}
// PaddedBlockReadError occurs if the number of bytes read do not
// equal the number of bytes specified.
type PaddedBlockReadError struct {
ActualLen int
ExpectedLen int
}
// Error implements the error interface of PaddedBlockReadError.
func (e PaddedBlockReadError) Error() string {
return fmt.Sprintf("Reading block data out of padded block resulted in %d bytes, expected %d",
e.ActualLen, e.ExpectedLen)
}
// NotDirectFileBlockError indicates that a direct file block was
// expected, but something else (e.g., an indirect file block) was
// given instead.
type NotDirectFileBlockError struct {
}
func (e NotDirectFileBlockError) Error() string {
return fmt.Sprintf("Unexpected block type; expected a direct file block")
}
// KeyHalfMismatchError is returned when the key server doesn't return the expected key half.
type KeyHalfMismatchError struct {
Expected TLFCryptKeyServerHalfID
Actual TLFCryptKeyServerHalfID
}
// Error implements the error interface for KeyHalfMismatchError.
func (e KeyHalfMismatchError) Error() string {
return fmt.Sprintf("Key mismatch, expected ID: %s, actual ID: %s",
e.Expected, e.Actual)
}
// InvalidBranchID indicates whether the branch ID string is not
// parseable or invalid.
type InvalidBranchID struct {
id string
}
func (e InvalidBranchID) Error() string {
return fmt.Sprintf("Invalid branch ID %q", e.id)
}
// MDServerDisconnected indicates the MDServer has been disconnected for clients waiting
// on an update channel.
type MDServerDisconnected struct {
}
// Error implements the error interface for MDServerDisconnected.
func (e MDServerDisconnected) Error() string {
return "MDServer is disconnected"
}
// MDRevisionMismatch indicates that we tried to apply a revision that
// was not the next in line.
type MDRevisionMismatch struct {
rev MetadataRevision
curr MetadataRevision
}
// Error implements the error interface for MDRevisionMismatch.
func (e MDRevisionMismatch) Error() string {
return fmt.Sprintf("MD revision %d isn't next in line for our "+
"current revision %d", e.rev, e.curr)
}
// MDTlfIDMismatch indicates that the ID field of a successor MD
// doesn't match the ID field of its predecessor.
type MDTlfIDMismatch struct {
currID tlf.ID
nextID tlf.ID
}
func (e MDTlfIDMismatch) Error() string {
return fmt.Sprintf("TLF ID %s doesn't match successor TLF ID %s",
e.currID, e.nextID)
}
// MDPrevRootMismatch indicates that the PrevRoot field of a successor
// MD doesn't match the metadata ID of its predecessor.
type MDPrevRootMismatch struct {
prevRoot MdID
expectedPrevRoot MdID
}
func (e MDPrevRootMismatch) Error() string {
return fmt.Sprintf("PrevRoot %s doesn't match expected %s",
e.prevRoot, e.expectedPrevRoot)
}
// MDDiskUsageMismatch indicates an inconsistency in the DiskUsage
// field of a RootMetadata object.
type MDDiskUsageMismatch struct {
expectedDiskUsage uint64
actualDiskUsage uint64
}
func (e MDDiskUsageMismatch) Error() string {
return fmt.Sprintf("Disk usage %d doesn't match expected %d",
e.actualDiskUsage, e.expectedDiskUsage)
}
// MDUpdateInvertError indicates that we tried to apply a revision that
// was not the next in line.
type MDUpdateInvertError struct {
rev MetadataRevision
curr MetadataRevision
}
// Error implements the error interface for MDUpdateInvertError.
func (e MDUpdateInvertError) Error() string {
return fmt.Sprintf("MD revision %d isn't next in line for our "+
"current revision %d while inverting", e.rev, e.curr)
}
// NotPermittedWhileDirtyError indicates that some operation failed
// because of outstanding dirty files, and may be retried later.
type NotPermittedWhileDirtyError struct {
}
// Error implements the error interface for NotPermittedWhileDirtyError.
func (e NotPermittedWhileDirtyError) Error() string {
return "Not permitted while writes are dirty"
}
// NoChainFoundError indicates that a conflict resolution chain
// corresponding to the given pointer could not be found.
type NoChainFoundError struct {
ptr BlockPointer
}
// Error implements the error interface for NoChainFoundError.
func (e NoChainFoundError) Error() string {
return fmt.Sprintf("No chain found for %v", e.ptr)
}
// DisallowedPrefixError indicates that the user attempted to create
// an entry using a name with a disallowed prefix.
type DisallowedPrefixError struct {
name string
prefix string
}
// Error implements the error interface for NoChainFoundError.
func (e DisallowedPrefixError) Error() string {
return fmt.Sprintf("Cannot create %s because it has the prefix %s",
e.name, e.prefix)
}
// FileTooBigError indicates that the user tried to write a file that
// would be bigger than KBFS's supported size.
type FileTooBigError struct {
p path
size int64
maxAllowedBytes uint64
}
// Error implements the error interface for FileTooBigError.
func (e FileTooBigError) Error() string {
return fmt.Sprintf("File %s would have increased to %d bytes, which is "+
"over the supported limit of %d bytes", e.p, e.size, e.maxAllowedBytes)
}
// NameTooLongError indicates that the user tried to write a directory
// entry name that would be bigger than KBFS's supported size.
type NameTooLongError struct {
name string
maxAllowedBytes uint32
}
// Error implements the error interface for NameTooLongError.
func (e NameTooLongError) Error() string {
return fmt.Sprintf("New directory entry name %s has more than the maximum "+
"allowed number of bytes (%d)", e.name, e.maxAllowedBytes)
}
// DirTooBigError indicates that the user tried to write a directory
// that would be bigger than KBFS's supported size.
type DirTooBigError struct {
p path
size uint64
maxAllowedBytes uint64
}
// Error implements the error interface for DirTooBigError.
func (e DirTooBigError) Error() string {
return fmt.Sprintf("Directory %s would have increased to at least %d "+
"bytes, which is over the supported limit of %d bytes", e.p,
e.size, e.maxAllowedBytes)
}
// TlfNameNotCanonical indicates that a name isn't a canonical, and
// that another (not necessarily canonical) name should be tried.
type TlfNameNotCanonical struct {
Name, NameToTry string
}
func (e TlfNameNotCanonical) Error() string {
return fmt.Sprintf("TLF name %s isn't canonical: try %s instead",
e.Name, e.NameToTry)
}
// NoCurrentSessionError indicates that the daemon has no current
// session. This is basically a wrapper for session.ErrNoSession,
// needed to give the correct return error code to the OS.
type NoCurrentSessionError struct {
}
// Error implements the error interface for NoCurrentSessionError.
func (e NoCurrentSessionError) Error() string {
return "You are not logged into Keybase. Try `keybase login`."
}
// NoCurrentSessionExpectedError is the error text that will get
// converted into a NoCurrentSessionError.
var NoCurrentSessionExpectedError = "no current session"
// RekeyPermissionError indicates that the user tried to rekey a
// top-level folder in a manner inconsistent with their permissions.
type RekeyPermissionError struct {
User libkb.NormalizedUsername
Dir string
}
// Error implements the error interface for RekeyPermissionError
func (e RekeyPermissionError) Error() string {
return fmt.Sprintf("%s is trying to rekey directory %s in a manner "+
"inconsistent with their role", e.User, e.Dir)
}
// NewRekeyPermissionError constructs a RekeyPermissionError for the given
// directory and user.
func NewRekeyPermissionError(
dir *TlfHandle, username libkb.NormalizedUsername) error {
dirname := dir.GetCanonicalPath()
return RekeyPermissionError{username, dirname}
}
// RekeyIncompleteError is returned when a rekey is partially done but
// needs a writer to finish it.
type RekeyIncompleteError struct{}
func (e RekeyIncompleteError) Error() string {
return fmt.Sprintf("Rekey did not complete due to insufficient user permissions")
}
// TimeoutError is just a replacement for context.DeadlineExceeded
// with a more friendly error string.
type TimeoutError struct {
}
func (e TimeoutError) Error() string {
return "Operation timed out"
}
// InvalidOpError is returned when an operation is called that isn't supported
// by the current implementation.
type InvalidOpError struct {
op string
}
func (e InvalidOpError) Error() string {
return fmt.Sprintf("Invalid operation: %s", e.op)
}
// CRAbandonStagedBranchError indicates that conflict resolution had to
// abandon a staged branch due to an unresolvable error.
type CRAbandonStagedBranchError struct {
Err error
Bid BranchID
}
func (e CRAbandonStagedBranchError) Error() string {
return fmt.Sprintf("Abandoning staged branch %s due to an error: %v",
e.Bid, e.Err)
}
// NoSuchFolderListError indicates that the user tried to access a
// subdirectory of /keybase that doesn't exist.
type NoSuchFolderListError struct {
Name string
PrivName string
PubName string
}
// Error implements the error interface for NoSuchFolderListError
func (e NoSuchFolderListError) Error() string {
return fmt.Sprintf("/keybase/%s is not a Keybase folder. "+
"All folders begin with /keybase/%s or /keybase/%s.",
e.Name, e.PrivName, e.PubName)
}
// UnexpectedUnmergedPutError indicates that we tried to do an
// unmerged put when that was disallowed.
type UnexpectedUnmergedPutError struct {
}
// Error implements the error interface for UnexpectedUnmergedPutError
func (e UnexpectedUnmergedPutError) Error() string {
return "Unmerged puts are not allowed"
}
// NoSuchTlfHandleError indicates we were unable to resolve a folder
// ID to a folder handle.
type NoSuchTlfHandleError struct {
ID tlf.ID
}
// Error implements the error interface for NoSuchTlfHandleError
func (e NoSuchTlfHandleError) Error() string {
return fmt.Sprintf("Folder handle for %s not found", e.ID)
}
// MetadataIsFinalError indicates that we tried to make or set a
// successor to a finalized folder.
type MetadataIsFinalError struct {
}
// Error implements the error interface for MetadataIsFinalError.
func (e MetadataIsFinalError) Error() string {
return "Metadata is final"
}
// IncompatibleHandleError indicates that somethine tried to update
// the head of a TLF with a RootMetadata with an incompatible handle.
type IncompatibleHandleError struct {
oldName CanonicalTlfName
partiallyResolvedOldName CanonicalTlfName
newName CanonicalTlfName
}
func (e IncompatibleHandleError) Error() string {
return fmt.Sprintf(
"old head %q resolves to %q instead of new head %q",
e.oldName, e.partiallyResolvedOldName, e.newName)
}
// ShutdownHappenedError indicates that shutdown has happened.
type ShutdownHappenedError struct {
}
// Error implements the error interface for ShutdownHappenedError.
func (e ShutdownHappenedError) Error() string {
return "Shutdown happened"
}
// UnmergedError indicates that fbo is on an unmerged local revision
type UnmergedError struct {
}
// Error implements the error interface for UnmergedError.
func (e UnmergedError) Error() string {
return "fbo is on an unmerged local revision"
}
// ExclOnUnmergedError happens when an operation with O_EXCL set when fbo is on
// an unmerged local revision
type ExclOnUnmergedError struct {
}
// Error implements the error interface for ExclOnUnmergedError.
func (e ExclOnUnmergedError) Error() string {
return "an operation with O_EXCL set is called but fbo is on an unmerged local version"
}
// OverQuotaWarning indicates that the user is over their quota, and
// is being slowed down by the server.
type OverQuotaWarning struct {
UsageBytes int64
LimitBytes int64
}
// Error implements the error interface for OverQuotaWarning.
func (w OverQuotaWarning) Error() string {
return fmt.Sprintf("You are using %d bytes, and your plan limits you "+
"to %d bytes. Please delete some data.", w.UsageBytes, w.LimitBytes)
}
// OpsCantHandleFavorite means that folderBranchOps wasn't able to
// deal with a favorites request.
type OpsCantHandleFavorite struct {
Msg string
}
// Error implements the error interface for OpsCantHandleFavorite.
func (e OpsCantHandleFavorite) Error() string {
return fmt.Sprintf("Couldn't handle the favorite operation: %s", e.Msg)
}
// TlfHandleFinalizedError is returned when something attempts to modify
// a finalized TLF handle.
type TlfHandleFinalizedError struct {
}
// Error implements the error interface for TlfHandleFinalizedError.
func (e TlfHandleFinalizedError) Error() string {
return "Attempt to modify finalized TLF handle"
}
// NoSigChainError means that a user we were trying to identify does
// not have a sigchain.
type NoSigChainError struct {
User libkb.NormalizedUsername
}
// Error implements the error interface for NoSigChainError.
func (e NoSigChainError) Error() string {
return fmt.Sprintf("%s has not yet installed Keybase and set up the "+
"Keybase filesystem. Please ask them to.", e.User)
}
// RekeyConflictError indicates a conflict happened while trying to rekey.
type RekeyConflictError struct {
Err error
}
// Error implements the error interface for RekeyConflictError.
func (e RekeyConflictError) Error() string {
return fmt.Sprintf("Conflict during a rekey, not retrying: %v", e.Err)
}
// UnmergedSelfConflictError indicates that we hit a conflict on the
// unmerged branch, so a previous MD PutUnmerged we thought had
// failed, had actually succeeded.
type UnmergedSelfConflictError struct {
Err error
}
// Error implements the error interface for UnmergedSelfConflictError.
func (e UnmergedSelfConflictError) Error() string {
return fmt.Sprintf("Unmerged self conflict: %v", e.Err)
}
// MutableBareRootMetadataNoImplError is returned when an interface expected
// to implement MutableBareRootMetadata does not do so.
type MutableBareRootMetadataNoImplError struct {
}
// Error implements the error interface for MutableBareRootMetadataNoImplError
func (e MutableBareRootMetadataNoImplError) Error() string {
return "Does not implement MutableBareRootMetadata"
}
// blockNonExistentError is returned when a block doesn't exist. This
// is a generic error, suitable for use by non-server types, whereas
// BServerErrorBlockNonExistent is used only by servers.
type blockNonExistentError struct {
id kbfsblock.ID
}
func (e blockNonExistentError) Error() string {
return fmt.Sprintf("block %s does not exist", e.id)
}
// TLFCryptKeyNotPerDeviceEncrypted is returned when a given TLFCryptKey is not
// encrypted per-device but rather symmetrically encrypted with the current
// generation of the TLFCryptKey.
type TLFCryptKeyNotPerDeviceEncrypted struct {
tlf tlf.ID
keyGen KeyGen
}
// // Error implements the error interface for TLFCryptKeyNotPerDeviceEncrypted
func (e TLFCryptKeyNotPerDeviceEncrypted) Error() string {
return fmt.Sprintf("TLF crypt key for %s at generation %d is not per-device encrypted",
e.tlf, e.keyGen)
}
type cachePutCacheFullError struct {
ptr BlockPointer
}
func (e cachePutCacheFullError) Error() string {
return fmt.Sprintf("tried and failed to put transient block into the cache because it is full. Pointer: %+v", e.ptr)
}
// FileTooBigForCRError indicates that a file is too big to fit in
// memory, and CR can't handle it.
type FileTooBigForCRError struct {
p path
}
// Error implements the error interface for FileTooBigForCRError.
func (e FileTooBigForCRError) Error() string {
return fmt.Sprintf("Cannot complete CR because the file %s is too big", e.p)
}
// NoMergedMDError indicates that no MDs for this folder have been
// created yet.
type NoMergedMDError struct {
tlf tlf.ID
}
// Error implements the error interface for NoMergedMDError.
func (e NoMergedMDError) Error() string {
return fmt.Sprintf("No MD yet for TLF %s", e.tlf)
}
| 1 | 15,707 | No trailing punctuation in the error message. | keybase-kbfs | go |
@@ -447,5 +447,6 @@ public class Notifier
}
}
clip.start();
+ clip.close();
}
} | 1 | /*
* Copyright (c) 2016-2017, Adam <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client;
import com.google.common.escape.Escaper;
import com.google.common.escape.Escapers;
import com.google.inject.Inject;
import java.awt.Color;
import java.awt.Graphics2D;
import java.awt.Rectangle;
import java.awt.Toolkit;
import java.awt.TrayIcon;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.inject.Singleton;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.Clip;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.UnsupportedAudioFileException;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.ChatMessageType;
import net.runelite.api.Client;
import net.runelite.api.Constants;
import net.runelite.api.GameState;
import net.runelite.client.chat.ChatColorType;
import net.runelite.client.chat.ChatMessageBuilder;
import net.runelite.client.chat.ChatMessageManager;
import net.runelite.client.chat.QueuedMessage;
import net.runelite.client.config.FlashNotification;
import net.runelite.client.config.RuneLiteConfig;
import net.runelite.client.eventbus.EventBus;
import net.runelite.client.events.NotificationFired;
import net.runelite.client.ui.ClientUI;
import net.runelite.client.util.OSType;
@Singleton
@Slf4j
public class Notifier
{
@Getter
@RequiredArgsConstructor
public enum NativeCustomOff
{
NATIVE("Native"),
CUSTOM("Custom"),
OFF("Off");
private final String name;
@Override
public String toString()
{
return name;
}
}
// Default timeout of notification in milliseconds
private static final int DEFAULT_TIMEOUT = 10000;
private static final String DOUBLE_QUOTE = "\"";
private static final Escaper SHELL_ESCAPE = Escapers.builder()
.addEscape('"', "'")
.build();
// Notifier properties
private static final Color FLASH_COLOR = new Color(255, 0, 0, 70);
private static final int MINIMUM_FLASH_DURATION_MILLIS = 2000;
private static final int MINIMUM_FLASH_DURATION_TICKS = MINIMUM_FLASH_DURATION_MILLIS / Constants.CLIENT_TICK_LENGTH;
private static final String appName = RuneLiteProperties.getTitle();
private final Client client;
private final RuneLiteConfig runeLiteConfig;
private final ClientUI clientUI;
private final ScheduledExecutorService executorService;
private final ChatMessageManager chatMessageManager;
private final EventBus eventBus;
private final Path notifyIconPath;
private final boolean terminalNotifierAvailable;
private Instant flashStart;
private long mouseLastPressedMillis;
@Inject
private Notifier(
final ClientUI clientUI,
final Client client,
final RuneLiteConfig runeliteConfig,
final ScheduledExecutorService executorService,
final ChatMessageManager chatMessageManager,
final EventBus eventBus)
{
this.client = client;
this.clientUI = clientUI;
this.runeLiteConfig = runeliteConfig;
this.executorService = executorService;
this.chatMessageManager = chatMessageManager;
this.eventBus = eventBus;
this.notifyIconPath = RuneLite.RUNELITE_DIR.toPath().resolve("icon.png");
// First check if we are running in launcher
this.terminalNotifierAvailable = true;
storeIcon();
}
public void notify(String message)
{
notify(message, TrayIcon.MessageType.NONE);
}
public void notify(String message, TrayIcon.MessageType type)
{
eventBus.post(NotificationFired.class, new NotificationFired(message, type));
if (!runeLiteConfig.sendNotificationsWhenFocused() && clientUI.isFocused())
{
return;
}
if (runeLiteConfig.requestFocusOnNotification())
{
clientUI.requestFocus();
}
if (runeLiteConfig.enableTrayNotifications())
{
sendNotification(appName, message, type);
}
switch (runeLiteConfig.notificationSound())
{
case NATIVE:
Toolkit.getDefaultToolkit().beep();
break;
case CUSTOM:
executorService.submit(this::playCustomSound);
}
if (runeLiteConfig.enableGameMessageNotification() && client.getGameState() == GameState.LOGGED_IN)
{
final String formattedMessage = new ChatMessageBuilder()
.append(ChatColorType.HIGHLIGHT)
.append(message)
.build();
chatMessageManager.queue(QueuedMessage.builder()
.type(ChatMessageType.CONSOLE)
.name(appName)
.runeLiteFormattedMessage(formattedMessage)
.build());
}
if (runeLiteConfig.flashNotification() != FlashNotification.DISABLED)
{
flashStart = Instant.now();
mouseLastPressedMillis = client.getMouseLastPressedMillis();
}
log.debug(message);
}
public void processFlash(final Graphics2D graphics)
{
if (flashStart == null || client.getGameState() != GameState.LOGGED_IN)
{
flashStart = null;
return;
}
FlashNotification flashNotification = runeLiteConfig.flashNotification();
if (client.getGameCycle() % 40 >= 20
// For solid colour, fall through every time.
&& (flashNotification == FlashNotification.FLASH_TWO_SECONDS
|| flashNotification == FlashNotification.FLASH_UNTIL_CANCELLED))
{
return;
}
final Color color = graphics.getColor();
graphics.setColor(FLASH_COLOR);
graphics.fill(new Rectangle(client.getCanvas().getSize()));
graphics.setColor(color);
if (!Instant.now().minusMillis(MINIMUM_FLASH_DURATION_MILLIS).isAfter(flashStart))
{
return;
}
switch (flashNotification)
{
case FLASH_TWO_SECONDS:
case SOLID_TWO_SECONDS:
flashStart = null;
break;
case SOLID_UNTIL_CANCELLED:
case FLASH_UNTIL_CANCELLED:
// Any interaction with the client since the notification started will cancel it after the minimum duration
if ((client.getMouseIdleTicks() < MINIMUM_FLASH_DURATION_TICKS
|| client.getKeyboardIdleTicks() < MINIMUM_FLASH_DURATION_TICKS
|| client.getMouseLastPressedMillis() > mouseLastPressedMillis) && clientUI.isFocused())
{
flashStart = null;
}
break;
}
}
private void sendNotification(
final String title,
final String message,
final TrayIcon.MessageType type)
{
final String escapedTitle = SHELL_ESCAPE.escape(title);
final String escapedMessage = SHELL_ESCAPE.escape(message);
switch (OSType.getOSType())
{
case Linux:
sendLinuxNotification(escapedTitle, escapedMessage, type);
break;
case MacOS:
sendMacNotification(escapedTitle, escapedMessage);
break;
default:
sendTrayNotification(title, message, type);
}
}
private void sendTrayNotification(
final String title,
final String message,
final TrayIcon.MessageType type)
{
if (clientUI.getTrayIcon() != null)
{
clientUI.getTrayIcon().displayMessage(title, message, type);
}
}
private void sendLinuxNotification(
final String title,
final String message,
final TrayIcon.MessageType type)
{
final List<String> commands = new ArrayList<>();
commands.add("notify-send");
commands.add(title);
commands.add(message);
commands.add("-i");
commands.add(SHELL_ESCAPE.escape(notifyIconPath.toAbsolutePath().toString()));
commands.add("-u");
commands.add(toUrgency(type));
commands.add("-t");
commands.add(String.valueOf(DEFAULT_TIMEOUT));
executorService.submit(() ->
{
try
{
Process notificationProcess = sendCommand(commands);
boolean exited = notificationProcess.waitFor(500, TimeUnit.MILLISECONDS);
if (exited && notificationProcess.exitValue() == 0)
{
return;
}
}
catch (IOException | InterruptedException ex)
{
log.debug("error sending notification", ex);
}
// fall back to tray notification
sendTrayNotification(title, message, type);
});
}
private void sendMacNotification(final String title, final String message)
{
final List<String> commands = new ArrayList<>();
if (terminalNotifierAvailable)
{
commands.add("terminal-notifier");
commands.add("-group");
commands.add("net.runelite.launcher");
commands.add("-sender");
commands.add("net.runelite.launcher");
commands.add("-message");
commands.add(DOUBLE_QUOTE + message + DOUBLE_QUOTE);
commands.add("-title");
commands.add(DOUBLE_QUOTE + title + DOUBLE_QUOTE);
}
else
{
commands.add("osascript");
commands.add("-e");
final String script = "display notification " + DOUBLE_QUOTE +
message +
DOUBLE_QUOTE +
" with title " +
DOUBLE_QUOTE +
title +
DOUBLE_QUOTE;
commands.add(script);
}
try
{
sendCommand(commands);
}
catch (IOException ex)
{
log.warn("error sending notification", ex);
}
}
private static Process sendCommand(final List<String> commands) throws IOException
{
return new ProcessBuilder(commands.toArray(new String[0]))
.redirectErrorStream(true)
.start();
}
private void storeIcon()
{
if (OSType.getOSType() == OSType.Linux && !Files.exists(notifyIconPath))
{
try (InputStream stream = Notifier.class.getResourceAsStream("/openosrs.png"))
{
Files.copy(stream, notifyIconPath);
}
catch (IOException ex)
{
log.warn(null, ex);
}
}
}
private boolean isTerminalNotifierAvailable()
{
if (OSType.getOSType() == OSType.MacOS)
{
try
{
final Process exec = Runtime.getRuntime().exec(new String[]{"terminal-notifier", "-help"});
exec.waitFor();
return exec.exitValue() == 0;
}
catch (IOException | InterruptedException e)
{
return false;
}
}
return false;
}
private static String toUrgency(TrayIcon.MessageType type)
{
switch (type)
{
case WARNING:
case ERROR:
return "critical";
default:
return "normal";
}
}
private void playCustomSound()
{
Clip clip = null;
// Try to load the user sound from ~/.runelite/notification.wav
File file = new File(RuneLite.RUNELITE_DIR, "notification.wav");
if (file.exists())
{
try
{
InputStream fileStream = new BufferedInputStream(new FileInputStream(file));
try (AudioInputStream sound = AudioSystem.getAudioInputStream(fileStream))
{
clip = AudioSystem.getClip();
clip.open(sound);
}
}
catch (UnsupportedAudioFileException | IOException | LineUnavailableException e)
{
clip = null;
log.warn("Unable to play notification sound", e);
}
}
if (clip == null)
{
// Otherwise load from the classpath
InputStream fileStream = new BufferedInputStream(Notifier.class.getResourceAsStream("notification.wav"));
try (AudioInputStream sound = AudioSystem.getAudioInputStream(fileStream))
{
clip = AudioSystem.getClip();
clip.open(sound);
}
catch (UnsupportedAudioFileException | IOException | LineUnavailableException e)
{
log.warn("Unable to play builtin notification sound", e);
Toolkit.getDefaultToolkit().beep();
return;
}
}
clip.start();
}
}
| 1 | 16,388 | tested? seems like it could break | open-osrs-runelite | java |
@@ -77,9 +77,10 @@ public final class IndexUpgrader {
public static void main(String[] args) throws IOException {
parseArgs(args).upgrade();
}
-
+
+ /** Parse arguments. */
@SuppressForbidden(reason = "System.out required: command line tool")
- static IndexUpgrader parseArgs(String[] args) throws IOException {
+ public static IndexUpgrader parseArgs(String[] args) throws IOException {
String path = null;
boolean deletePriorCommits = false;
InfoStream out = null; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.CommandLineUtil;
import org.apache.lucene.util.InfoStream;
import org.apache.lucene.util.PrintStreamInfoStream;
import org.apache.lucene.util.SuppressForbidden;
import org.apache.lucene.util.Version;
import java.io.IOException;
import java.nio.file.Paths;
import java.nio.file.Path;
import java.util.Collection;
/**
* This is an easy-to-use tool that upgrades all segments of an index from previous Lucene versions
* to the current segment file format. It can be used from command line:
* <pre>
* java -cp lucene-core.jar org.apache.lucene.index.IndexUpgrader [-delete-prior-commits] [-verbose] indexDir
* </pre>
* Alternatively this class can be instantiated and {@link #upgrade} invoked. It uses {@link UpgradeIndexMergePolicy}
* and triggers the upgrade via an forceMerge request to {@link IndexWriter}.
* <p>This tool keeps only the last commit in an index; for this
* reason, if the incoming index has more than one commit, the tool
* refuses to run by default. Specify {@code -delete-prior-commits}
* to override this, allowing the tool to delete all but the last commit.
* From Java code this can be enabled by passing {@code true} to
* {@link #IndexUpgrader(Directory,InfoStream,boolean)}.
* <p><b>Warning:</b> This tool may reorder documents if the index was partially
* upgraded before execution (e.g., documents were added). If your application relies
* on "monotonicity" of doc IDs (which means that the order in which the documents
* were added to the index is preserved), do a full forceMerge instead.
* The {@link MergePolicy} set by {@link IndexWriterConfig} may also reorder
* documents.
*/
public final class IndexUpgrader {
private static final String LOG_PREFIX = "IndexUpgrader";
@SuppressForbidden(reason = "System.out required: command line tool")
private static void printUsage() {
System.err.println("Upgrades an index so all segments created with a previous Lucene version are rewritten.");
System.err.println("Usage:");
System.err.println(" java " + IndexUpgrader.class.getName() + " [-delete-prior-commits] [-verbose] [-dir-impl X] indexDir");
System.err.println("This tool keeps only the last commit in an index; for this");
System.err.println("reason, if the incoming index has more than one commit, the tool");
System.err.println("refuses to run by default. Specify -delete-prior-commits to override");
System.err.println("this, allowing the tool to delete all but the last commit.");
System.err.println("Specify a " + FSDirectory.class.getSimpleName() +
" implementation through the -dir-impl option to force its use. If no package is specified the "
+ FSDirectory.class.getPackage().getName() + " package will be used.");
System.err.println("WARNING: This tool may reorder document IDs!");
System.exit(1);
}
/** Main method to run {code IndexUpgrader} from the
* command-line. */
@SuppressWarnings("deprecation")
public static void main(String[] args) throws IOException {
parseArgs(args).upgrade();
}
@SuppressForbidden(reason = "System.out required: command line tool")
static IndexUpgrader parseArgs(String[] args) throws IOException {
String path = null;
boolean deletePriorCommits = false;
InfoStream out = null;
String dirImpl = null;
int i = 0;
while (i<args.length) {
String arg = args[i];
if ("-delete-prior-commits".equals(arg)) {
deletePriorCommits = true;
} else if ("-verbose".equals(arg)) {
out = new PrintStreamInfoStream(System.out);
} else if ("-dir-impl".equals(arg)) {
if (i == args.length - 1) {
System.out.println("ERROR: missing value for -dir-impl option");
System.exit(1);
}
i++;
dirImpl = args[i];
} else if (path == null) {
path = arg;
} else {
printUsage();
}
i++;
}
if (path == null) {
printUsage();
}
Path p = Paths.get(path);
Directory dir = null;
if (dirImpl == null) {
dir = FSDirectory.open(p);
} else {
dir = CommandLineUtil.newFSDirectory(dirImpl, p);
}
return new IndexUpgrader(dir, out, deletePriorCommits);
}
private final Directory dir;
private final IndexWriterConfig iwc;
private final boolean deletePriorCommits;
/** Creates index upgrader on the given directory, using an {@link IndexWriter} using the given
* {@code matchVersion}. The tool refuses to upgrade indexes with multiple commit points. */
public IndexUpgrader(Directory dir) {
this(dir, new IndexWriterConfig(null), false);
}
/** Creates index upgrader on the given directory, using an {@link IndexWriter} using the given
* {@code matchVersion}. You have the possibility to upgrade indexes with multiple commit points by removing
* all older ones. If {@code infoStream} is not {@code null}, all logging output will be sent to this stream. */
public IndexUpgrader(Directory dir, InfoStream infoStream, boolean deletePriorCommits) {
this(dir, new IndexWriterConfig(null), deletePriorCommits);
if (null != infoStream) {
this.iwc.setInfoStream(infoStream);
}
}
/** Creates index upgrader on the given directory, using an {@link IndexWriter} using the given
* config. You have the possibility to upgrade indexes with multiple commit points by removing
* all older ones. */
public IndexUpgrader(Directory dir, IndexWriterConfig iwc, boolean deletePriorCommits) {
this.dir = dir;
this.iwc = iwc;
this.deletePriorCommits = deletePriorCommits;
}
/** Perform the upgrade. */
public void upgrade() throws IOException {
if (!DirectoryReader.indexExists(dir)) {
throw new IndexNotFoundException(dir.toString());
}
if (!deletePriorCommits) {
final Collection<IndexCommit> commits = DirectoryReader.listCommits(dir);
if (commits.size() > 1) {
throw new IllegalArgumentException("This tool was invoked to not delete prior commit points, but the following commits were found: " + commits);
}
}
iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy()));
iwc.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
try (final IndexWriter w = new IndexWriter(dir, iwc)) {
InfoStream infoStream = iwc.getInfoStream();
if (infoStream.isEnabled(LOG_PREFIX)) {
infoStream.message(LOG_PREFIX, "Upgrading all pre-" + Version.LATEST + " segments of index directory '" + dir + "' to version " + Version.LATEST + "...");
}
w.forceMerge(1);
if (infoStream.isEnabled(LOG_PREFIX)) {
infoStream.message(LOG_PREFIX, "All segments upgraded to version " + Version.LATEST);
infoStream.message(LOG_PREFIX, "Enforcing commit to rewrite all index metadata...");
}
w.setLiveCommitData(w.getLiveCommitData()); // fake change to enforce a commit (e.g. if index has no segments)
assert w.hasUncommittedChanges();
w.commit();
if (infoStream.isEnabled(LOG_PREFIX)) {
infoStream.message(LOG_PREFIX, "Committed upgraded metadata to index.");
}
}
}
}
| 1 | 37,841 | This have to be public because the renamed o.a.l.backward_index.TestBackwardsCompatibility refers this. | apache-lucene-solr | java |
@@ -286,7 +286,7 @@ func TestApplicationsUpgradeOverGossip(t *testing.T) {
fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes100SecondTestUnupgradedProtocol.json"))
// for the primary node, we want to have a different consensus which always enables applications.
- primaryNodeUnupgradedProtocol := consensus[consensusTestFastUpgrade(protocol.ConsensusFuture)]
+ primaryNodeUnupgradedProtocol := consensus[consensusTestFastUpgrade(protocol.ConsensusCurrentVersion)]
primaryNodeUnupgradedProtocol.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
primaryNodeUnupgradedProtocol.ApprovedUpgrades[consensusTestFastUpgrade(protocol.ConsensusFuture)] = 0
consensus[consensusTestUnupgradedProtocol] = primaryNodeUnupgradedProtocol | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package upgrades
import (
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
)
// consensusTestUnupgradedProtocol is a version of ConsensusCurrentVersion
// that allows the control of the upgrade from consensusTestUnupgradedProtocol to
// test-fast-upgrade-future
const consensusTestUnupgradedProtocol = protocol.ConsensusVersion("test-unupgraded-protocol")
func makeApplicationUpgradeConsensus(t *testing.T) (appConsensus config.ConsensusProtocols) {
appConsensus = generateFastUpgradeConsensus()
// make sure that the "current" version does not support application and that the "future" version *does* support applications.
currentProtocolParams, ok := appConsensus[consensusTestFastUpgrade(protocol.ConsensusCurrentVersion)]
require.True(t, ok)
futureProtocolParams, ok := appConsensus[consensusTestFastUpgrade(protocol.ConsensusFuture)]
require.True(t, ok)
// ensure it's disabled.
currentProtocolParams.Application = false
currentProtocolParams.SupportRekeying = false
// verify that the future protocol supports applications.
require.True(t, futureProtocolParams.Application)
// add an upgrade path from current to future.
currentProtocolParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
currentProtocolParams.ApprovedUpgrades[consensusTestFastUpgrade(protocol.ConsensusFuture)] = 0
appConsensus[consensusTestUnupgradedProtocol] = currentProtocolParams
appConsensus[consensusTestFastUpgrade(protocol.ConsensusFuture)] = futureProtocolParams
return
}
// TestApplicationsUpgrade tests that we can safely upgrade from a version that doesn't support applications
// to a version that supports applications. It verify that prior to supporting applications, the node would not accept
// any application transaction and after the upgrade is complete, it would support that.
func TestApplicationsUpgradeOverREST(t *testing.T) {
smallLambdaMs := 500
consensus := makeApplicationUpgradeConsensus(t)
var fixture fixtures.RestClientFixture
fixture.SetConsensus(consensus)
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100SecondTestUnupgradedProtocol.json"))
defer fixture.Shutdown()
client := fixture.GetLibGoalClientForNamedNode("Node")
accountList, err := fixture.GetNodeWalletsSortedByBalance(client.DataDir())
require.NoError(t, err)
creator := accountList[0].Address
wh, err := client.GetUnencryptedWalletHandle()
require.NoError(t, err)
user, err := client.GenerateAddress(wh)
require.NoError(t, err)
fee := uint64(1000)
round, err := client.CurrentRound()
require.NoError(t, err)
// Fund the manager, so it can issue transactions later on
_, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
require.NoError(t, err)
client.WaitForRound(round + 2)
// There should be no apps to start with
ad, err := client.AccountData(creator)
require.NoError(t, err)
require.Zero(t, len(ad.AppParams))
ad, err = client.AccountData(user)
require.NoError(t, err)
require.Zero(t, len(ad.AppParams))
require.Equal(t, basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos)
counter := `#pragma version 2
// a simple global and local calls counter app
byte b64 Y291bnRlcg== // counter
dup
app_global_get
int 1
+
app_global_put // update the counter
int 0
int 0
app_opted_in
bnz opted_in
err
opted_in:
int 0 // account idx for app_local_put
byte b64 Y291bnRlcg== // counter
int 0
byte b64 Y291bnRlcg==
app_local_get
int 1 // increment
+
app_local_put
int 1
`
approvalOps, err := logic.AssembleString(counter)
require.NoError(t, err)
clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1")
require.NoError(t, err)
schema := basics.StateSchema{
NumUint: 1,
}
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil,
)
require.NoError(t, err)
tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx)
require.NoError(t, err)
signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
require.NoError(t, err)
round, err = client.CurrentRound()
require.NoError(t, err)
_, err = client.BroadcastTransaction(signedTxn)
require.Error(t, err)
require.Contains(t, err.Error(), "application transaction not supported")
curStatus, err := client.Status()
require.NoError(t, err)
initialStatus := curStatus
startLoopTime := time.Now()
// wait until the network upgrade : this can take a while.
for curStatus.LastVersion == initialStatus.LastVersion {
curStatus, err = client.Status()
require.NoError(t, err)
require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond)
round = curStatus.LastRound
}
// now, that we have upgraded to the new protocol which supports applications, try again.
_, err = client.BroadcastTransaction(signedTxn)
require.NoError(t, err)
curStatus, err = client.Status()
require.NoError(t, err)
round = curStatus.LastRound
client.WaitForRound(round + 2)
pendingTx, err := client.GetPendingTransactions(1)
require.NoError(t, err)
require.Equal(t, uint64(0), pendingTx.TotalTxns)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
require.NoError(t, err)
require.Equal(t, 1, len(ad.AppParams))
var appIdx basics.AppIndex
var params basics.AppParams
for i, p := range ad.AppParams {
appIdx = i
params = p
break
}
require.Equal(t, approvalOps.Program, params.ApprovalProgram)
require.Equal(t, clearstateOps.Program, params.ClearStateProgram)
require.Equal(t, schema, params.LocalStateSchema)
require.Equal(t, schema, params.GlobalStateSchema)
require.Equal(t, 1, len(params.GlobalState))
value, ok := params.GlobalState["counter"]
require.True(t, ok)
require.Equal(t, uint64(1), value.Uint)
require.Equal(t, 1, len(ad.AppLocalStates))
state, ok := ad.AppLocalStates[appIdx]
require.True(t, ok)
require.Equal(t, schema, state.Schema)
require.Equal(t, 1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
require.True(t, ok)
require.Equal(t, uint64(1), value.Uint)
// call the app
tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
require.NoError(t, err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
require.NoError(t, err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
require.NoError(t, err)
round, err = client.CurrentRound()
require.NoError(t, err)
_, err = client.BroadcastTransaction(signedTxn)
require.NoError(t, err)
client.WaitForRound(round + 2)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
require.NoError(t, err)
require.Equal(t, 1, len(ad.AppParams))
params, ok = ad.AppParams[appIdx]
require.True(t, ok)
require.Equal(t, approvalOps.Program, params.ApprovalProgram)
require.Equal(t, clearstateOps.Program, params.ClearStateProgram)
require.Equal(t, schema, params.LocalStateSchema)
require.Equal(t, schema, params.GlobalStateSchema)
require.Equal(t, 1, len(params.GlobalState))
value, ok = params.GlobalState["counter"]
require.True(t, ok)
require.Equal(t, uint64(2), value.Uint)
require.Equal(t, 1, len(ad.AppLocalStates))
state, ok = ad.AppLocalStates[appIdx]
require.True(t, ok)
require.Equal(t, schema, state.Schema)
require.Equal(t, 1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
require.True(t, ok)
require.Equal(t, uint64(1), value.Uint)
require.Equal(t, uint64(2), ad.TotalAppSchema.NumUint)
// check user's balance record for the app entry and the state changes
ad, err = client.AccountData(user)
require.NoError(t, err)
require.Equal(t, 0, len(ad.AppParams))
require.Equal(t, 1, len(ad.AppLocalStates))
state, ok = ad.AppLocalStates[appIdx]
require.True(t, ok)
require.Equal(t, schema, state.Schema)
require.Equal(t, 1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
require.True(t, ok)
require.Equal(t, uint64(1), value.Uint)
require.Equal(t, basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
app, err := client.ApplicationInformation(uint64(appIdx))
require.NoError(t, err)
require.Equal(t, uint64(appIdx), app.Id)
require.Equal(t, creator, app.Params.Creator)
return
}
// TestApplicationsUpgrade tests that we can safely upgrade from a version that doesn't support applications
// to a version that supports applications. It verify that prior to supporting applications, the node would not accept
// any application transaction and after the upgrade is complete, it would support that.
func TestApplicationsUpgradeOverGossip(t *testing.T) {
smallLambdaMs := 500
consensus := makeApplicationUpgradeConsensus(t)
var fixture fixtures.RestClientFixture
fixture.SetConsensus(consensus)
fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes100SecondTestUnupgradedProtocol.json"))
// for the primary node, we want to have a different consensus which always enables applications.
primaryNodeUnupgradedProtocol := consensus[consensusTestFastUpgrade(protocol.ConsensusFuture)]
primaryNodeUnupgradedProtocol.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
primaryNodeUnupgradedProtocol.ApprovedUpgrades[consensusTestFastUpgrade(protocol.ConsensusFuture)] = 0
consensus[consensusTestUnupgradedProtocol] = primaryNodeUnupgradedProtocol
client := fixture.GetLibGoalClientForNamedNode("Primary")
secondary := fixture.GetLibGoalClientForNamedNode("Node")
err := config.SaveConfigurableConsensus(client.DataDir(), consensus)
require.NoError(t, err)
fixture.Start()
defer fixture.Shutdown()
accountList, err := fixture.GetNodeWalletsSortedByBalance(client.DataDir())
require.NoError(t, err)
creator := accountList[0].Address
wh, err := client.GetUnencryptedWalletHandle()
require.NoError(t, err)
user, err := client.GenerateAddress(wh)
require.NoError(t, err)
fee := uint64(1000)
round, err := client.CurrentRound()
require.NoError(t, err)
// Fund the manager, so it can issue transactions later on
_, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil)
require.NoError(t, err)
client.WaitForRound(round + 2)
round, err = client.CurrentRound()
require.NoError(t, err)
// There should be no apps to start with
ad, err := client.AccountData(creator)
require.NoError(t, err)
require.Zero(t, len(ad.AppParams))
ad, err = client.AccountData(user)
require.NoError(t, err)
require.Zero(t, len(ad.AppParams))
require.Equal(t, basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos)
counter := `#pragma version 2
// a simple global and local calls counter app
byte b64 Y291bnRlcg== // counter
dup
app_global_get
int 1
+
app_global_put // update the counter
int 0
int 0
app_opted_in
bnz opted_in
err
opted_in:
int 0 // account idx for app_local_put
byte b64 Y291bnRlcg== // counter
int 0
byte b64 Y291bnRlcg==
app_local_get
int 1 // increment
+
app_local_put
int 1
`
approvalOps, err := logic.AssembleString(counter)
require.NoError(t, err)
clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1")
require.NoError(t, err)
schema := basics.StateSchema{
NumUint: 1,
}
// create the app
tx, err := client.MakeUnsignedAppCreateTx(
transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil,
)
require.NoError(t, err)
tx, err = client.FillUnsignedTxTemplate(creator, round, round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds, fee, tx)
require.NoError(t, err)
signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx)
require.NoError(t, err)
round, err = client.CurrentRound()
require.NoError(t, err)
_, err = client.BroadcastTransaction(signedTxn)
require.NoError(t, err)
// this transaction is expect to reach the first node ( primary ), but to be rejected by the second node when transmitted over gossip.
client.WaitForRound(round + 2)
// check that the primary node still has this transaction in it's transaction pool.
pendingTx, err := client.GetPendingTransactions(1)
require.NoError(t, err)
round, err = client.CurrentRound()
require.NoError(t, err)
if round > round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds {
t.Skip("Test platform is too slow for this test")
}
require.Equal(t, uint64(1), pendingTx.TotalTxns)
// check that the secondary node doesn't have that transaction in it's transaction pool.
pendingTx, err = secondary.GetPendingTransactions(1)
require.NoError(t, err)
require.Equal(t, uint64(0), pendingTx.TotalTxns)
curStatus, err := client.Status()
require.NoError(t, err)
initialStatus := curStatus
startLoopTime := time.Now()
// wait until the network upgrade : this can take a while.
for curStatus.LastVersion == initialStatus.LastVersion {
curStatus, err = client.Status()
require.NoError(t, err)
require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond)
round = curStatus.LastRound
}
// now, that we have upgraded to the new protocol which supports applications, try again.
tx, err = client.FillUnsignedTxTemplate(creator, round, round+100, fee, tx)
require.NoError(t, err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
require.NoError(t, err)
_, err = client.BroadcastTransaction(signedTxn)
require.NoError(t, err)
curStatus, err = client.Status()
require.NoError(t, err)
round = curStatus.LastRound
client.WaitForRound(round + 2)
pendingTx, err = client.GetPendingTransactions(1)
require.NoError(t, err)
require.Equal(t, uint64(0), pendingTx.TotalTxns)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
require.NoError(t, err)
require.Equal(t, 1, len(ad.AppParams))
var appIdx basics.AppIndex
var params basics.AppParams
for i, p := range ad.AppParams {
appIdx = i
params = p
break
}
require.Equal(t, approvalOps.Program, params.ApprovalProgram)
require.Equal(t, clearstateOps.Program, params.ClearStateProgram)
require.Equal(t, schema, params.LocalStateSchema)
require.Equal(t, schema, params.GlobalStateSchema)
require.Equal(t, 1, len(params.GlobalState))
value, ok := params.GlobalState["counter"]
require.True(t, ok)
require.Equal(t, uint64(1), value.Uint)
require.Equal(t, 1, len(ad.AppLocalStates))
state, ok := ad.AppLocalStates[appIdx]
require.True(t, ok)
require.Equal(t, schema, state.Schema)
require.Equal(t, 1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
require.True(t, ok)
require.Equal(t, uint64(1), value.Uint)
// call the app
tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil)
require.NoError(t, err)
tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx)
require.NoError(t, err)
signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx)
require.NoError(t, err)
round, err = client.CurrentRound()
require.NoError(t, err)
_, err = client.BroadcastTransaction(signedTxn)
require.NoError(t, err)
client.WaitForRound(round + 2)
// check creator's balance record for the app entry and the state changes
ad, err = client.AccountData(creator)
require.NoError(t, err)
require.Equal(t, 1, len(ad.AppParams))
params, ok = ad.AppParams[appIdx]
require.True(t, ok)
require.Equal(t, approvalOps.Program, params.ApprovalProgram)
require.Equal(t, clearstateOps.Program, params.ClearStateProgram)
require.Equal(t, schema, params.LocalStateSchema)
require.Equal(t, schema, params.GlobalStateSchema)
require.Equal(t, 1, len(params.GlobalState))
value, ok = params.GlobalState["counter"]
require.True(t, ok)
require.Equal(t, uint64(2), value.Uint)
require.Equal(t, 1, len(ad.AppLocalStates))
state, ok = ad.AppLocalStates[appIdx]
require.True(t, ok)
require.Equal(t, schema, state.Schema)
require.Equal(t, 1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
require.True(t, ok)
require.Equal(t, uint64(1), value.Uint)
require.Equal(t, uint64(2), ad.TotalAppSchema.NumUint)
// check user's balance record for the app entry and the state changes
ad, err = client.AccountData(user)
require.NoError(t, err)
require.Equal(t, 0, len(ad.AppParams))
require.Equal(t, 1, len(ad.AppLocalStates))
state, ok = ad.AppLocalStates[appIdx]
require.True(t, ok)
require.Equal(t, schema, state.Schema)
require.Equal(t, 1, len(state.KeyValue))
value, ok = state.KeyValue["counter"]
require.True(t, ok)
require.Equal(t, uint64(1), value.Uint)
require.Equal(t, basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos)
app, err := client.ApplicationInformation(uint64(appIdx))
require.NoError(t, err)
require.Equal(t, uint64(appIdx), app.Id)
require.Equal(t, creator, app.Params.Creator)
return
}
| 1 | 41,624 | unrelated to your change, but I don't think that this is correct anymore. We also seen to remove application support from `primaryNodeUnupgradedProtocol` for the test to be correct. | algorand-go-algorand | go |
@@ -440,6 +440,19 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
)
+
+ } else if len(envCfg.WebIdentityTokenFilePath) > 0 {
+ // handles assume role via OIDC token. This should happen before any other
+ // assume role call.
+ sessionName := envCfg.IAMRoleSessionName
+ if len(sessionName) == 0 {
+ sessionName = sharedCfg.AssumeRole.RoleSessionName
+ }
+
+ cfg.Credentials = stscreds.NewWebIdentityCredentials(&Session{
+ Config: cfg,
+ Handlers: handlers.Copy(),
+ }, envCfg.WebIdentityRoleARN, sessionName, envCfg.WebIdentityTokenFilePath)
} else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
cfgCp := *cfg
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( | 1 | package session
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/csm"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
)
// A Session provides a central location to create service clients from and
// store configurations and request handlers for those services.
//
// Sessions are safe to create service clients concurrently, but it is not safe
// to mutate the Session concurrently.
//
// The Session satisfies the service client's client.ConfigProvider.
type Session struct {
Config *aws.Config
Handlers request.Handlers
}
// New creates a new instance of the handlers merging in the provided configs
// on top of the SDK's default configurations. Once the Session is created it
// can be mutated to modify the Config or Handlers. The Session is safe to be
// read concurrently, but it should not be written to concurrently.
//
// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
// method could now encounter an error when loading the configuration. When
// The environment variable is set, and an error occurs, New will return a
// session that will fail all requests reporting the error that occurred while
// loading the session. Use NewSession to get the error when creating the
// session.
//
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
// the shared config file (~/.aws/config) will also be loaded, in addition to
// the shared credentials file (~/.aws/credentials). Values set in both the
// shared config, and shared credentials will be taken from the shared
// credentials file.
//
// Deprecated: Use NewSession functions to create sessions instead. NewSession
// has the same functionality as New except an error can be returned when the
// func is called instead of waiting to receive an error until a request is made.
func New(cfgs ...*aws.Config) *Session {
// load initial config from environment
envCfg := loadEnvConfig()
if envCfg.EnableSharedConfig {
var cfg aws.Config
cfg.MergeIn(cfgs...)
s, err := NewSessionWithOptions(Options{
Config: cfg,
SharedConfigState: SharedConfigEnable,
})
if err != nil {
// Old session.New expected all errors to be discovered when
// a request is made, and would report the errors then. This
// needs to be replicated if an error occurs while creating
// the session.
msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
"Use session.NewSession to handle errors occurring during session creation."
// Session creation failed, need to report the error and prevent
// any requests from succeeding.
s = &Session{Config: defaults.Config()}
s.Config.MergeIn(cfgs...)
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
s.Handlers.Validate.PushBack(func(r *request.Request) {
r.Error = err
})
}
return s
}
s := deprecatedNewSession(cfgs...)
if envCfg.CSMEnabled {
enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
}
return s
}
// NewSession returns a new Session created from SDK defaults, config files,
// environment, and user provided config files. Once the Session is created
// it can be mutated to modify the Config or Handlers. The Session is safe to
// be read concurrently, but it should not be written to concurrently.
//
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
// the shared config file (~/.aws/config) will also be loaded in addition to
// the shared credentials file (~/.aws/credentials). Values set in both the
// shared config, and shared credentials will be taken from the shared
// credentials file. Enabling the Shared Config will also allow the Session
// to be built with retrieving credentials with AssumeRole set in the config.
//
// See the NewSessionWithOptions func for information on how to override or
// control through code how the Session will be created. Such as specifying the
// config profile, and controlling if shared config is enabled or not.
func NewSession(cfgs ...*aws.Config) (*Session, error) {
opts := Options{}
opts.Config.MergeIn(cfgs...)
return NewSessionWithOptions(opts)
}
// SharedConfigState provides the ability to optionally override the state
// of the session's creation based on the shared config being enabled or
// disabled.
type SharedConfigState int
const (
// SharedConfigStateFromEnv does not override any state of the
// AWS_SDK_LOAD_CONFIG env var. It is the default value of the
// SharedConfigState type.
SharedConfigStateFromEnv SharedConfigState = iota
// SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
// and disables the shared config functionality.
SharedConfigDisable
// SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
// and enables the shared config functionality.
SharedConfigEnable
)
// Options provides the means to control how a Session is created and what
// configuration values will be loaded.
//
type Options struct {
// Provides config values for the SDK to use when creating service clients
// and making API requests to services. Any value set in with this field
// will override the associated value provided by the SDK defaults,
// environment or config files where relevant.
//
// If not set, configuration values from from SDK defaults, environment,
// config will be used.
Config aws.Config
// Overrides the config profile the Session should be created from. If not
// set the value of the environment variable will be loaded (AWS_PROFILE,
// or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
//
// If not set and environment variables are not set the "default"
// (DefaultSharedConfigProfile) will be used as the profile to load the
// session config from.
Profile string
// Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
// environment variable. By default a Session will be created using the
// value provided by the AWS_SDK_LOAD_CONFIG environment variable.
//
// Setting this value to SharedConfigEnable or SharedConfigDisable
// will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
// and enable or disable the shared config functionality.
SharedConfigState SharedConfigState
// Ordered list of files the session will load configuration from.
// It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
SharedConfigFiles []string
// When the SDK's shared config is configured to assume a role with MFA
// this option is required in order to provide the mechanism that will
// retrieve the MFA token. There is no default value for this field. If
// it is not set an error will be returned when creating the session.
//
// This token provider will be called when ever the assumed role's
// credentials need to be refreshed. Within the context of service clients
// all sharing the same session the SDK will ensure calls to the token
// provider are atomic. When sharing a token provider across multiple
// sessions additional synchronization logic is needed to ensure the
// token providers do not introduce race conditions. It is recommend to
// share the session where possible.
//
// stscreds.StdinTokenProvider is a basic implementation that will prompt
// from stdin for the MFA token code.
//
// This field is only used if the shared configuration is enabled, and
// the config enables assume role wit MFA via the mfa_serial field.
AssumeRoleTokenProvider func() (string, error)
// Reader for a custom Credentials Authority (CA) bundle in PEM format that
// the SDK will use instead of the default system's root CA bundle. Use this
// only if you want to replace the CA bundle the SDK uses for TLS requests.
//
// Enabling this option will attempt to merge the Transport into the SDK's HTTP
// client. If the client's Transport is not a http.Transport an error will be
// returned. If the Transport's TLS config is set this option will cause the SDK
// to overwrite the Transport's TLS config's RootCAs value. If the CA
// bundle reader contains multiple certificates all of them will be loaded.
//
// The Session option CustomCABundle is also available when creating sessions
// to also enable this feature. CustomCABundle session option field has priority
// over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
CustomCABundle io.Reader
}
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
// environment, and user provided config files. This func uses the Options
// values to configure how the Session is created.
//
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
// the shared config file (~/.aws/config) will also be loaded in addition to
// the shared credentials file (~/.aws/credentials). Values set in both the
// shared config, and shared credentials will be taken from the shared
// credentials file. Enabling the Shared Config will also allow the Session
// to be built with retrieving credentials with AssumeRole set in the config.
//
// // Equivalent to session.New
// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
//
// // Specify profile to load for the session's config
// sess := session.Must(session.NewSessionWithOptions(session.Options{
// Profile: "profile_name",
// }))
//
// // Specify profile for config and region for requests
// sess := session.Must(session.NewSessionWithOptions(session.Options{
// Config: aws.Config{Region: aws.String("us-east-1")},
// Profile: "profile_name",
// }))
//
// // Force enable Shared Config support
// sess := session.Must(session.NewSessionWithOptions(session.Options{
// SharedConfigState: session.SharedConfigEnable,
// }))
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
if opts.SharedConfigState == SharedConfigEnable {
envCfg = loadSharedEnvConfig()
} else {
envCfg = loadEnvConfig()
}
if len(opts.Profile) > 0 {
envCfg.Profile = opts.Profile
}
switch opts.SharedConfigState {
case SharedConfigDisable:
envCfg.EnableSharedConfig = false
case SharedConfigEnable:
envCfg.EnableSharedConfig = true
}
// Only use AWS_CA_BUNDLE if session option is not provided.
if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
f, err := os.Open(envCfg.CustomCABundle)
if err != nil {
return nil, awserr.New("LoadCustomCABundleError",
"failed to open custom CA bundle PEM file", err)
}
defer f.Close()
opts.CustomCABundle = f
}
return newSession(opts, envCfg, &opts.Config)
}
// Must is a helper function to ensure the Session is valid and there was no
// error when calling a NewSession function.
//
// This helper is intended to be used in variable initialization to load the
// Session and configuration at startup. Such as:
//
// var sess = session.Must(session.NewSession())
func Must(sess *Session, err error) *Session {
if err != nil {
panic(err)
}
return sess
}
func deprecatedNewSession(cfgs ...*aws.Config) *Session {
cfg := defaults.Config()
handlers := defaults.Handlers()
// Apply the passed in configs so the configuration can be applied to the
// default credential chain
cfg.MergeIn(cfgs...)
if cfg.EndpointResolver == nil {
// An endpoint resolver is required for a session to be able to provide
// endpoints for service client configurations.
cfg.EndpointResolver = endpoints.DefaultResolver()
}
cfg.Credentials = defaults.CredChain(cfg, handlers)
// Reapply any passed in configs to override credentials if set
cfg.MergeIn(cfgs...)
s := &Session{
Config: cfg,
Handlers: handlers,
}
initHandlers(s)
return s
}
func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
logger.Log("Enabling CSM")
if len(port) == 0 {
port = csm.DefaultPort
}
r, err := csm.Start(clientID, "127.0.0.1:"+port)
if err != nil {
return
}
r.InjectHandlers(handlers)
}
func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
cfg := defaults.Config()
handlers := defaults.Handlers()
// Get a merged version of the user provided config to determine if
// credentials were.
userCfg := &aws.Config{}
userCfg.MergeIn(cfgs...)
// Ordered config files will be loaded in with later files overwriting
// previous config file values.
var cfgFiles []string
if opts.SharedConfigFiles != nil {
cfgFiles = opts.SharedConfigFiles
} else {
cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
if !envCfg.EnableSharedConfig {
// The shared config file (~/.aws/config) is only loaded if instructed
// to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
cfgFiles = cfgFiles[1:]
}
}
// Load additional config from file(s)
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
if err != nil {
return nil, err
}
if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
return nil, err
}
s := &Session{
Config: cfg,
Handlers: handlers,
}
initHandlers(s)
if envCfg.CSMEnabled {
enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
}
// Setup HTTP client with custom cert bundle if enabled
if opts.CustomCABundle != nil {
if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
return nil, err
}
}
return s, nil
}
func loadCustomCABundle(s *Session, bundle io.Reader) error {
var t *http.Transport
switch v := s.Config.HTTPClient.Transport.(type) {
case *http.Transport:
t = v
default:
if s.Config.HTTPClient.Transport != nil {
return awserr.New("LoadCustomCABundleError",
"unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
}
}
if t == nil {
t = &http.Transport{}
}
p, err := loadCertPool(bundle)
if err != nil {
return err
}
if t.TLSClientConfig == nil {
t.TLSClientConfig = &tls.Config{}
}
t.TLSClientConfig.RootCAs = p
s.Config.HTTPClient.Transport = t
return nil
}
func loadCertPool(r io.Reader) (*x509.CertPool, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, awserr.New("LoadCustomCABundleError",
"failed to read custom CA bundle PEM file", err)
}
p := x509.NewCertPool()
if !p.AppendCertsFromPEM(b) {
return nil, awserr.New("LoadCustomCABundleError",
"failed to load custom CA bundle PEM file", err)
}
return p, nil
}
func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
// Merge in user provided configuration
cfg.MergeIn(userCfg)
// Region if not already set by user
if len(aws.StringValue(cfg.Region)) == 0 {
if len(envCfg.Region) > 0 {
cfg.WithRegion(envCfg.Region)
} else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
cfg.WithRegion(sharedCfg.Region)
}
}
// Configure credentials if not already set
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
if len(envCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
)
} else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
cfgCp := *cfg
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.AssumeRoleSource.Creds,
)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = stscreds.NewCredentials(
&Session{
Config: &cfgCp,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
// Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
)
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.Creds,
)
} else {
// Fallback to default credentials provider, include mock errors
// for the credential chain so user can identify why credentials
// failed to be retrieved.
cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: []credentials.Provider{
&credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
&credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
defaults.RemoteCredProvider(*cfg, handlers),
},
})
}
}
return nil
}
// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
// MFAToken option is not set when shared config is configured load assume a
// role with an MFA token.
type AssumeRoleTokenProviderNotSetError struct{}
// Code is the short id of the error.
func (e AssumeRoleTokenProviderNotSetError) Code() string {
return "AssumeRoleTokenProviderNotSetError"
}
// Message is the description of the error
func (e AssumeRoleTokenProviderNotSetError) Message() string {
return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
}
// OrigErr is the underlying error that caused the failure.
func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
return nil
}
// Error satisfies the error interface.
func (e AssumeRoleTokenProviderNotSetError) Error() string {
return awserr.SprintError(e.Code(), e.Message(), "", nil)
}
type credProviderError struct {
Err error
}
var emptyCreds = credentials.Value{}
func (c credProviderError) Retrieve() (credentials.Value, error) {
return credentials.Value{}, c.Err
}
func (c credProviderError) IsExpired() bool {
return true
}
func initHandlers(s *Session) {
// Add the Validate parameter handler if it is not disabled.
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
if !aws.BoolValue(s.Config.DisableParamValidation) {
s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
}
}
// Copy creates and returns a copy of the current Session, coping the config
// and handlers. If any additional configs are provided they will be merged
// on top of the Session's copied config.
//
// // Create a copy of the current Session, configured for the us-west-2 region.
// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{
Config: s.Config.Copy(cfgs...),
Handlers: s.Handlers.Copy(),
}
initHandlers(newSession)
return newSession
}
// ClientConfig satisfies the client.ConfigProvider interface and is used to
// configure the service client instances. Passing the Session to the service
// client's constructor (New) will use this method to configure the client.
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
// Backwards compatibility, the error will be eaten if user calls ClientConfig
// directly. All SDK services will use ClientconfigWithError.
cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
return cfg
}
func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
s = s.Copy(cfgs...)
var resolved endpoints.ResolvedEndpoint
var err error
region := aws.StringValue(s.Config.Region)
if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
resolved.SigningRegion = region
} else {
resolved, err = s.Config.EndpointResolver.EndpointFor(
serviceName, region,
func(opt *endpoints.Options) {
opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
// Support the condition where the service is modeled but its
// endpoint metadata is not available.
opt.ResolveUnknownService = true
},
)
}
return client.Config{
Config: s.Config,
Handlers: s.Handlers,
Endpoint: resolved.URL,
SigningRegion: resolved.SigningRegion,
SigningNameDerived: resolved.SigningNameDerived,
SigningName: resolved.SigningName,
}, err
}
// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
// that the EndpointResolver will not be used to resolve the endpoint. The only
// endpoint set must come from the aws.Config.Endpoint field.
func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
s = s.Copy(cfgs...)
var resolved endpoints.ResolvedEndpoint
region := aws.StringValue(s.Config.Region)
if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
resolved.SigningRegion = region
}
return client.Config{
Config: s.Config,
Handlers: s.Handlers,
Endpoint: resolved.URL,
SigningRegion: resolved.SigningRegion,
SigningNameDerived: resolved.SigningNameDerived,
SigningName: resolved.SigningName,
}
}
| 1 | 9,389 | Should this also validate that the RoleArn env var is provided, or just let the creds fail? | aws-aws-sdk-go | go |
@@ -20,10 +20,12 @@
package transport
+import "github.com/opentracing/opentracing-go"
+
// Deps is the interface of any object useful for passing injected
// dependencies into inbound and outbound transports.
type Deps interface {
- // Tracer() opentracing.Tracer
+ Tracer() opentracing.Tracer
}
// NoDeps is a no-op implementation of Deps | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package transport
// Deps is the interface of any object useful for passing injected
// dependencies into inbound and outbound transports.
type Deps interface {
// Tracer() opentracing.Tracer
}
// NoDeps is a no-op implementation of Deps
var NoDeps noDeps
type noDeps struct{}
// func (d noDeps) Tracer() opentracing.Tracer {
// return opentracing.NoopTracer{}
// }
| 1 | 10,074 | fwiw, in tchannel-go I've implemented a similar method on TChannel that either returns a Tracer instance it was initialized with, or returns `opentracing.GlobalTracer()`, which by default happens to return a singleton instance of `NoopTracer`. In Go the use of global variables is not frowned upon as say in Java, so this pattern allows minimal work to enable tracing in a service, one just needs to store a real tracer in the global variable, e.g. via `opentracing.InitGlobalTracer(someTracerImpl)`, while the instances of TChannel do not need to be explicitly given any tracer since they will default to that global. | yarpc-yarpc-go | go |
@@ -135,6 +135,10 @@ class SeriesTest(ReusedSQLTestCase, SQLTestUtils):
self.assertEqual(kidx.name, "renamed")
self.assert_eq(kidx, pidx)
+ expected_error_message = "Series.name must be a hashable type"
+ with self.assertRaisesRegex(TypeError, expected_error_message):
+ kser.name = ["0", "1"]
+
def test_rename_method(self):
# Series name
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import unittest
from collections import defaultdict
from distutils.version import LooseVersion
import inspect
from io import BytesIO
from itertools import product
from datetime import datetime, timedelta
import matplotlib
matplotlib.use("agg")
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import pyspark
from pyspark.ml.linalg import SparseVector
from databricks import koalas as ks
from databricks.koalas import Series
from databricks.koalas.testing.utils import (
ReusedSQLTestCase,
SQLTestUtils,
SPARK_CONF_ARROW_ENABLED,
)
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.series import MissingPandasLikeSeries
class SeriesTest(ReusedSQLTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def kser(self):
return ks.from_pandas(self.pser)
def test_series(self):
kser = self.kser
self.assertTrue(isinstance(kser, Series))
self.assert_eq(kser + 1, self.pser + 1)
def test_series_tuple_name(self):
pser = self.pser
pser.name = ("x", "a")
kser = ks.from_pandas(pser)
self.assert_eq(kser, pser)
self.assert_eq(kser.name, pser.name)
pser.name = ("y", "z")
kser.name = ("y", "z")
self.assert_eq(kser, pser)
self.assert_eq(kser.name, pser.name)
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
s = ks.range(10)["id"]
s.__repr__()
s.rename("a", inplace=True)
self.assertEqual(s.__repr__(), s.rename("a").__repr__())
def test_empty_series(self):
a = pd.Series([], dtype="i1")
b = pd.Series([], dtype="str")
self.assert_eq(ks.from_pandas(a), a)
self.assertRaises(ValueError, lambda: ks.from_pandas(b))
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ks.from_pandas(a), a)
self.assertRaises(ValueError, lambda: ks.from_pandas(b))
def test_all_null_series(self):
a = pd.Series([None, None, None], dtype="float64")
b = pd.Series([None, None, None], dtype="str")
self.assert_eq(ks.from_pandas(a).dtype, a.dtype)
self.assertTrue(ks.from_pandas(a).to_pandas().isnull().all())
self.assertRaises(ValueError, lambda: ks.from_pandas(b))
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(ks.from_pandas(a).dtype, a.dtype)
self.assertTrue(ks.from_pandas(a).to_pandas().isnull().all())
self.assertRaises(ValueError, lambda: ks.from_pandas(b))
def test_head(self):
kser = self.kser
pser = self.pser
self.assert_eq(kser.head(3), pser.head(3))
self.assert_eq(kser.head(0), pser.head(0))
self.assert_eq(kser.head(-3), pser.head(-3))
self.assert_eq(kser.head(-10), pser.head(-10))
def test_rename(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
kser = ks.from_pandas(pser)
pser.name = "renamed"
kser.name = "renamed"
self.assertEqual(kser.name, "renamed")
self.assert_eq(kser, pser)
# pser.name = None
# kser.name = None
# self.assertEqual(kser.name, None)
# self.assert_eq(kser, pser)
pidx = pser.index
kidx = kser.index
pidx.name = "renamed"
kidx.name = "renamed"
self.assertEqual(kidx.name, "renamed")
self.assert_eq(kidx, pidx)
def test_rename_method(self):
# Series name
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(kser.rename("y"), pser.rename("y"))
self.assertEqual(kser.name, "x") # no mutation
self.assert_eq(kser.rename(), pser.rename())
self.assert_eq((kser.rename("y") + 1).head(), (pser.rename("y") + 1).head())
kser.rename("z", inplace=True)
pser.rename("z", inplace=True)
self.assertEqual(kser.name, "z")
self.assert_eq(kser, pser)
# Series index
# pser = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
# kser = ks.from_pandas(s)
# TODO: index
# res = kser.rename(lambda x: x ** 2)
# self.assert_eq(res, pser.rename(lambda x: x ** 2))
# res = kser.rename(pser)
# self.assert_eq(res, pser.rename(pser))
# res = kser.rename(kser)
# self.assert_eq(res, pser.rename(pser))
# res = kser.rename(lambda x: x**2, inplace=True)
# self.assertis(res, kser)
# s.rename(lambda x: x**2, inplace=True)
# self.assert_eq(kser, pser)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name")
kser = ks.from_pandas(pser)
self.assert_eq(
pser.rename_axis("index2").sort_index(), kser.rename_axis("index2").sort_index(),
)
self.assert_eq(
(pser + 1).rename_axis("index2").sort_index(),
(kser + 1).rename_axis("index2").sort_index(),
)
pser2 = pser.copy()
kser2 = kser.copy()
pser2.rename_axis("index2", inplace=True)
kser2.rename_axis("index2", inplace=True)
self.assert_eq(pser2.sort_index(), kser2.sort_index())
self.assertRaises(ValueError, lambda: kser.rename_axis(["index2", "index3"]))
self.assertRaises(TypeError, lambda: kser.rename_axis(mapper=["index2"], index=["index3"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(),
kser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index(),
)
self.assert_eq(
pser.rename_axis(index=str.upper).sort_index(),
kser.rename_axis(index=str.upper).sort_index(),
)
else:
expected = kser
expected.index.name = "index2"
result = kser.rename_axis(index={"index": "index2", "missing": "index4"}).sort_index()
self.assert_eq(expected, result)
expected = kser
expected.index.name = "INDEX"
result = kser.rename_axis(index=str.upper).sort_index()
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
pser = pd.Series([1.0, 2.0, 3.0], index=index, name="name")
kser = ks.from_pandas(pser)
self.assert_eq(
pser.rename_axis(["index3", "index4"]).sort_index(),
kser.rename_axis(["index3", "index4"]).sort_index(),
)
self.assertRaises(ValueError, lambda: kser.rename_axis(["index3", "index4", "index5"]))
# index/columns parameters and dict_like/functions mappers introduced in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(
pser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index(),
kser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index(),
)
self.assert_eq(
pser.rename_axis(index=str.upper).sort_index(),
kser.rename_axis(index=str.upper).sort_index(),
)
else:
expected = kser
expected.index.names = ["index3", "index4"]
result = kser.rename_axis(
index={"index1": "index3", "index2": "index4", "missing": "index5"}
).sort_index()
self.assert_eq(expected, result)
expected.index.names = ["INDEX1", "INDEX2"]
result = kser.rename_axis(index=str.upper).sort_index()
self.assert_eq(expected, result)
def test_or(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf["left"] | pdf["right"], kdf["left"] | kdf["right"])
def test_and(self):
pdf = pd.DataFrame(
{
"left": [True, False, True, False, np.nan, np.nan, True, False, np.nan],
"right": [True, False, False, True, True, False, np.nan, np.nan, np.nan],
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
pdf["left"] & pdf["right"], kdf["left"] & kdf["right"],
)
def test_to_numpy(self):
pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(kser.to_numpy(), pser.values)
def test_isin(self):
pser = pd.Series(["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal")
kser = ks.from_pandas(pser)
self.assert_eq(kser.isin(["cow", "lama"]), pser.isin(["cow", "lama"]))
self.assert_eq(kser.isin({"cow"}), pser.isin({"cow"}))
msg = "only list-like objects are allowed to be passed to isin()"
with self.assertRaisesRegex(TypeError, msg):
kser.isin(1)
def test_drop_duplicates(self):
pdf = pd.DataFrame({"animal": ["lama", "cow", "lama", "beetle", "lama", "hippo"]})
kdf = ks.from_pandas(pdf)
pser = pdf.animal
kser = kdf.animal
self.assert_eq(kser.drop_duplicates().sort_index(), pser.drop_duplicates().sort_index())
self.assert_eq(
kser.drop_duplicates(keep="last").sort_index(),
pser.drop_duplicates(keep="last").sort_index(),
)
# inplace
kser.drop_duplicates(keep=False, inplace=True)
pser.drop_duplicates(keep=False, inplace=True)
self.assert_eq(kser.sort_index(), pser.sort_index())
self.assert_eq(kdf, pdf)
def test_reindex(self):
index = ["A", "B", "C", "D", "E"]
pser = pd.Series([1.0, 2.0, 3.0, 4.0, None], index=index, name="x")
kser = ks.from_pandas(pser)
self.assert_eq(pser, kser)
self.assert_eq(
pser.reindex(["A", "B"]).sort_index(), kser.reindex(["A", "B"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "B", "2", "3"]).sort_index(),
kser.reindex(["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
kser.reindex(["A", "E", "2"], fill_value=0).sort_index(),
)
self.assertRaises(TypeError, lambda: kser.reindex(index=123))
def test_fillna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.fillna(0), pser.fillna(0))
self.assert_eq(kser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
kser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
# test considering series does not have NA/NaN values
kser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(kser, pser)
kser = kdf.x.rename("y")
pser = pdf.x.rename("y")
kser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(kser.head(), pser.head())
pser = pd.Series([1, 2, 3, 4, 5, 6], name="x")
kser = ks.from_pandas(pser)
pser.loc[3] = np.nan
kser.loc[3] = np.nan
self.assert_eq(kser.fillna(0), pser.fillna(0))
self.assert_eq(kser.fillna(method="ffill"), pser.fillna(method="ffill"))
self.assert_eq(kser.fillna(method="bfill"), pser.fillna(method="bfill"))
# inplace fillna on non-nullable column
pdf = pd.DataFrame({"a": [1, 2, None], "b": [1, 2, 3]})
kdf = ks.from_pandas(pdf)
pser = pdf.b
kser = kdf.b
self.assert_eq(kser.fillna(0), pser.fillna(0))
self.assert_eq(kser.fillna(np.nan).fillna(0), pser.fillna(np.nan).fillna(0))
kser.fillna(0, inplace=True)
pser.fillna(0, inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
def test_dropna(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.dropna(), pser.dropna())
pser.dropna(inplace=True)
kser.dropna(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
def test_nunique(self):
pser = pd.Series([1, 2, 1, np.nan])
kser = ks.from_pandas(pser)
# Assert NaNs are dropped by default
nunique_result = kser.nunique()
self.assertEqual(nunique_result, 2)
self.assert_eq(nunique_result, pser.nunique())
# Assert including NaN values
nunique_result = kser.nunique(dropna=False)
self.assertEqual(nunique_result, 3)
self.assert_eq(nunique_result, pser.nunique(dropna=False))
# Assert approximate counts
self.assertEqual(ks.Series(range(100)).nunique(approx=True), 103)
self.assertEqual(ks.Series(range(100)).nunique(approx=True, rsd=0.01), 100)
def _test_value_counts(self):
# this is also containing test for Index & MultiIndex
pser = pd.Series(
[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
index=[1, 2, 1, 3, 3, np.nan, 1, 4, 2, np.nan, 3, np.nan, 3, 1, 3],
name="x",
)
kser = ks.from_pandas(pser)
exp = pser.value_counts()
res = kser.value_counts()
self.assertEqual(res.name, exp.name)
self.assert_eq(res, exp)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
with self.assertRaisesRegex(
NotImplementedError, "value_counts currently does not support bins"
):
kser.value_counts(bins=3)
pser.name = "index"
kser.name = "index"
self.assert_eq(kser.value_counts(), pser.value_counts())
# Series from DataFrame
pdf = pd.DataFrame({"a": [2, 2, 3], "b": [None, 1, None]})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.a.value_counts(normalize=True), pdf.a.value_counts(normalize=True))
self.assert_eq(kdf.a.value_counts(ascending=True), pdf.a.value_counts(ascending=True))
self.assert_eq(
kdf.a.value_counts(normalize=True, dropna=False),
pdf.a.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kdf.a.value_counts(ascending=True, dropna=False),
pdf.a.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with NaN index
pser = pd.Series([3, 2, 3, 1, 2, 3], index=[2.0, None, 5.0, 5.0, None, 5.0])
kser = ks.from_pandas(pser)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True)
)
self.assert_eq(
kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True)
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
)
# Series with MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
kser = ks.from_pandas(pser)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
kser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index has NaN
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", None), ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
kser = ks.from_pandas(pser)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
kser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
# Series with MultiIndex some of index is NaN.
# This test only available for pandas >= 0.24.
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
pser.index = pd.MultiIndex.from_tuples(
[("x", "a"), None, ("y", "c"), ("x", "a"), ("y", "c"), ("x", "a")]
)
kser = ks.from_pandas(pser)
self.assert_eq(kser.value_counts(normalize=True), pser.value_counts(normalize=True))
self.assert_eq(kser.value_counts(ascending=True), pser.value_counts(ascending=True))
self.assert_eq(
kser.value_counts(normalize=True, dropna=False),
pser.value_counts(normalize=True, dropna=False),
)
self.assert_eq(
kser.value_counts(ascending=True, dropna=False),
pser.value_counts(ascending=True, dropna=False),
)
# FIXME: MultiIndex.value_counts returns wrong indices.
self.assert_eq(
kser.index.value_counts(normalize=True),
pser.index.value_counts(normalize=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True),
pser.index.value_counts(ascending=True),
almost=True,
)
self.assert_eq(
kser.index.value_counts(normalize=True, dropna=False),
pser.index.value_counts(normalize=True, dropna=False),
almost=True,
)
self.assert_eq(
kser.index.value_counts(ascending=True, dropna=False),
pser.index.value_counts(ascending=True, dropna=False),
almost=True,
)
def test_value_counts(self):
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self._test_value_counts()
self.assertRaises(
RuntimeError,
lambda: ks.MultiIndex.from_tuples([("x", "a"), ("x", "b")]).value_counts(),
)
else:
self._test_value_counts()
def test_nsmallest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
kser = ks.Series(sample_lst, name="x")
self.assert_eq(kser.nsmallest(n=3), pser.nsmallest(n=3))
self.assert_eq(kser.nsmallest(), pser.nsmallest())
self.assert_eq((kser + 1).nsmallest(), (pser + 1).nsmallest())
def test_nlargest(self):
sample_lst = [1, 2, 3, 4, np.nan, 6]
pser = pd.Series(sample_lst, name="x")
kser = ks.Series(sample_lst, name="x")
self.assert_eq(kser.nlargest(n=3), pser.nlargest(n=3))
self.assert_eq(kser.nlargest(), pser.nlargest())
self.assert_eq((kser + 1).nlargest(), (pser + 1).nlargest())
def test_isnull(self):
pser = pd.Series([1, 2, 3, 4, np.nan, 6], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(kser.notnull(), pser.notnull())
self.assert_eq(kser.isnull(), pser.isnull())
pser = self.pser
kser = self.kser
self.assert_eq(kser.notnull(), pser.notnull())
self.assert_eq(kser.isnull(), pser.isnull())
def test_all(self):
for pser in [
pd.Series([True, True], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
kser = ks.from_pandas(pser)
self.assert_eq(kser.all(), pser.all())
pser = pd.Series([1, 2, 3, 4], name="x")
kser = ks.from_pandas(pser)
self.assert_eq((kser % 2 == 0).all(), (pser % 2 == 0).all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
kser.all(axis=1)
def test_any(self):
for pser in [
pd.Series([False, False], name="x"),
pd.Series([True, False], name="x"),
pd.Series([0, 1], name="x"),
pd.Series([1, 2, 3], name="x"),
pd.Series([True, True, None], name="x"),
pd.Series([True, False, None], name="x"),
pd.Series([], name="x"),
pd.Series([np.nan], name="x"),
]:
kser = ks.from_pandas(pser)
self.assert_eq(kser.any(), pser.any())
pser = pd.Series([1, 2, 3, 4], name="x")
kser = ks.from_pandas(pser)
self.assert_eq((kser % 2 == 0).any(), (pser % 2 == 0).any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
kser.any(axis=1)
def test_reset_index(self):
pdf = pd.DataFrame({"foo": [1, 2, 3, 4]}, index=pd.Index(["a", "b", "c", "d"], name="idx"))
kdf = ks.from_pandas(pdf)
pser = pdf.foo
kser = kdf.foo
self.assert_eq(kser.reset_index(), pser.reset_index())
self.assert_eq(kser.reset_index(name="values"), pser.reset_index(name="values"))
self.assert_eq(kser.reset_index(drop=True), pser.reset_index(drop=True))
# inplace
kser.reset_index(drop=True, inplace=True)
pser.reset_index(drop=True, inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
def test_reset_index_with_default_index_types(self):
pser = pd.Series([1, 2, 3], name="0", index=np.random.rand(3))
kser = ks.from_pandas(pser)
with ks.option_context("compute.default_index_type", "sequence"):
self.assert_eq(kser.reset_index(), pser.reset_index())
with ks.option_context("compute.default_index_type", "distributed-sequence"):
# the order might be changed.
self.assert_eq(kser.reset_index().sort_index(), pser.reset_index())
with ks.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(
kser.reset_index().to_pandas().reset_index(drop=True), pser.reset_index()
)
def test_sort_values(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4, 5, None, 7]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.sort_values(), pser.sort_values())
self.assert_eq(kser.sort_values(ascending=False), pser.sort_values(ascending=False))
self.assert_eq(kser.sort_values(na_position="first"), pser.sort_values(na_position="first"))
self.assertRaises(ValueError, lambda: kser.sort_values(na_position="invalid"))
# inplace
# pandas raises an exception when the Series is derived from DataFrame
kser.sort_values(inplace=True)
self.assert_eq(kser, pser.sort_values())
self.assert_eq(kdf, pdf)
pser = pdf.x.copy()
kser = kdf.x.copy()
kser.sort_values(inplace=True)
pser.sort_values(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
def test_sort_index(self):
pdf = pd.DataFrame({"x": [2, 1, np.nan]}, index=["b", "a", np.nan])
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: kser.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: kser.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: kser.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(kser.sort_index(), pser.sort_index())
# Assert sorting descending
self.assert_eq(kser.sort_index(ascending=False), pser.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(kser.sort_index(na_position="first"), pser.sort_index(na_position="first"))
# Assert sorting inplace
# pandas sorts pdf.x by the index and update the column only
# when the Series is derived from DataFrame.
kser.sort_index(inplace=True)
self.assert_eq(kser, pser.sort_index())
self.assert_eq(kdf, pdf)
pser = pdf.x.copy()
kser = kdf.x.copy()
kser.sort_index(inplace=True)
pser.sort_index(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
# Assert multi-indices
pser = pd.Series(range(4), index=[["b", "b", "a", "a"], [1, 0, 1, 0]], name="0")
kser = ks.from_pandas(pser)
self.assert_eq(kser.sort_index(), pser.sort_index())
self.assert_eq(kser.sort_index(level=[1, 0]), pser.sort_index(level=[1, 0]))
self.assert_eq(kser.reset_index().sort_index(), pser.reset_index().sort_index())
def test_to_datetime(self):
pser = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100)
kser = ks.from_pandas(pser)
self.assert_eq(
pd.to_datetime(pser, infer_datetime_format=True),
ks.to_datetime(kser, infer_datetime_format=True),
)
def test_missing(self):
kser = self.kser
missing_functions = inspect.getmembers(MissingPandasLikeSeries, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kser, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Series.*{}.*is deprecated".format(name)
):
getattr(kser, name)()
missing_properties = inspect.getmembers(
MissingPandasLikeSeries, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kser, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Series.*{}.*is deprecated".format(name)
):
getattr(kser, name)
def test_clip(self):
pser = pd.Series([0, 2, 4], index=np.random.rand(3))
kser = ks.from_pandas(pser)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(ValueError, msg=msg):
kser.clip(lower=[1])
with self.assertRaises(ValueError, msg=msg):
kser.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(kser.clip(), pser.clip())
# Assert lower only
self.assert_eq(kser.clip(1), pser.clip(1))
# Assert upper only
self.assert_eq(kser.clip(upper=3), pser.clip(upper=3))
# Assert lower and upper
self.assert_eq(kser.clip(1, 3), pser.clip(1, 3))
# Assert behavior on string values
str_kser = ks.Series(["a", "b", "c"])
self.assert_eq(str_kser.clip(1, 3), str_kser)
def test_is_unique(self):
# We can't use pandas' is_unique for comparison. pandas 0.23 ignores None
pser = pd.Series([1, 2, 2, None, None])
kser = ks.from_pandas(pser)
self.assertEqual(False, kser.is_unique)
self.assertEqual(False, (kser + 1).is_unique)
pser = pd.Series([1, None, None])
kser = ks.from_pandas(pser)
self.assertEqual(False, kser.is_unique)
self.assertEqual(False, (kser + 1).is_unique)
pser = pd.Series([1])
kser = ks.from_pandas(pser)
self.assertEqual(pser.is_unique, kser.is_unique)
self.assertEqual((pser + 1).is_unique, (kser + 1).is_unique)
pser = pd.Series([1, 1, 1])
kser = ks.from_pandas(pser)
self.assertEqual(pser.is_unique, kser.is_unique)
self.assertEqual((pser + 1).is_unique, (kser + 1).is_unique)
def test_to_list(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
self.assert_eq(self.kser.to_list(), self.pser.to_list())
else:
self.assert_eq(self.kser.tolist(), self.pser.tolist())
def test_append(self):
pser1 = pd.Series([1, 2, 3], name="0")
pser2 = pd.Series([4, 5, 6], name="0")
pser3 = pd.Series([4, 5, 6], index=[3, 4, 5], name="0")
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
kser3 = ks.from_pandas(pser3)
self.assert_eq(kser1.append(kser2), pser1.append(pser2))
self.assert_eq(kser1.append(kser3), pser1.append(pser3))
self.assert_eq(
kser1.append(kser2, ignore_index=True), pser1.append(pser2, ignore_index=True)
)
kser1.append(kser3, verify_integrity=True)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
kser1.append(kser2, verify_integrity=True)
def test_map(self):
pser = pd.Series(["cat", "dog", None, "rabbit"])
kser = ks.from_pandas(pser)
# Currently Koalas doesn't return NaN as pandas does.
self.assert_eq(kser.map({}), pser.map({}).replace({pd.np.nan: None}))
d = defaultdict(lambda: "abc")
self.assertTrue("abc" in repr(kser.map(d)))
self.assert_eq(kser.map(d), pser.map(d))
def tomorrow(date) -> datetime:
return date + timedelta(days=1)
pser = pd.Series([datetime(2019, 10, 24)])
kser = ks.from_pandas(pser)
self.assert_eq(kser.map(tomorrow), pser.map(tomorrow))
def test_add_prefix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
kser = ks.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), kser.add_prefix("item_"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.add_prefix("item_"), kser.add_prefix("item_"))
def test_add_suffix(self):
pser = pd.Series([1, 2, 3, 4], name="0")
kser = ks.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), kser.add_suffix("_item"))
pser = pd.Series(
[1, 2, 3],
name="0",
index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]),
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.add_suffix("_item"), kser.add_suffix("_item"))
def test_hist(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
kdf = ks.from_pandas(pdf)
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
_, ax1 = plt.subplots(1, 1)
# Using plot.hist() because pandas changes ticks props when called hist()
ax1 = pdf["a"].plot.hist()
_, ax2 = plt.subplots(1, 1)
ax2 = kdf["a"].hist()
self.assert_eq(plot_to_base64(ax1), plot_to_base64(ax2))
def test_cummin(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cummin(), kser.cummin())
self.assert_eq(pser.cummin(skipna=False), kser.cummin(skipna=False))
self.assert_eq(pser.cummin().sum(), kser.cummin().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
kser = ks.from_pandas(pser)
self.assert_eq(pser.cummin(), kser.cummin())
self.assert_eq(pser.cummin(skipna=False), kser.cummin(skipna=False))
def test_cummax(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cummax(), kser.cummax())
self.assert_eq(pser.cummax(skipna=False), kser.cummax(skipna=False))
self.assert_eq(pser.cummax().sum(), kser.cummax().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
kser = ks.from_pandas(pser)
self.assert_eq(pser.cummax(), kser.cummax())
self.assert_eq(pser.cummax(skipna=False), kser.cummax(skipna=False))
def test_cumsum(self):
pser = pd.Series([1.0, None, 0.0, 4.0, 9.0])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumsum(), kser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), kser.cumsum(skipna=False))
self.assert_eq(pser.cumsum().sum(), kser.cumsum().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumsum(), kser.cumsum())
self.assert_eq(pser.cumsum(skipna=False), kser.cumsum(skipna=False))
def test_cumprod(self):
pser = pd.Series([1.0, None, 1.0, 4.0, 9.0])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumprod(), kser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), kser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), kser.cumprod().sum())
# with integer type
pser = pd.Series([1, 10, 1, 4, 9])
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumprod(), kser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), kser.cumprod(skipna=False))
self.assert_eq(pser.cumprod().sum(), kser.cumprod().sum())
# with reversed index
pser.index = [4, 3, 2, 1, 0]
kser = ks.from_pandas(pser)
self.assert_eq(pser.cumprod(), kser.cumprod())
self.assert_eq(pser.cumprod(skipna=False), kser.cumprod(skipna=False))
with self.assertRaisesRegex(Exception, "values should be bigger than 0"):
ks.Series([0, 1]).cumprod().to_pandas()
def test_median(self):
with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"):
ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).median(accuracy="a")
def test_rank(self):
pser = pd.Series([1, 2, 3, 1], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(pser.rank(), kser.rank().sort_index())
self.assert_eq(pser.rank(ascending=False), kser.rank(ascending=False).sort_index())
self.assert_eq(pser.rank(method="min"), kser.rank(method="min").sort_index())
self.assert_eq(pser.rank(method="max"), kser.rank(method="max").sort_index())
self.assert_eq(pser.rank(method="first"), kser.rank(method="first").sort_index())
self.assert_eq(pser.rank(method="dense"), kser.rank(method="dense").sort_index())
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
kser.rank(method="nothing")
def test_round(self):
pser = pd.Series([0.028208, 0.038683, 0.877076], name="x")
kser = ks.from_pandas(pser)
self.assert_eq(pser.round(2), kser.round(2))
msg = "decimals must be an integer"
with self.assertRaisesRegex(ValueError, msg):
kser.round(1.5)
def test_quantile(self):
with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"):
ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(accuracy="a")
with self.assertRaisesRegex(ValueError, "q must be a float of an array of floats;"):
ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q="a")
with self.assertRaisesRegex(ValueError, "q must be a float of an array of floats;"):
ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=["a"])
def test_idxmax(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
kser = ks.Series(pser)
self.assertEqual(kser.idxmax(), pser.idxmax())
self.assertEqual(kser.idxmax(skipna=False), pser.idxmax(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
kser = ks.Series(pser)
self.assertEqual(kser.idxmax(), pser.idxmax())
self.assertEqual(kser.idxmax(skipna=False), pser.idxmax(skipna=False))
kser = ks.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
kser.idxmax()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
kser = ks.Series(pser)
self.assertEqual(kser.idxmax(), pser.idxmax())
self.assertEqual(repr(kser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False)))
def test_idxmin(self):
pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"])
kser = ks.Series(pser)
self.assertEqual(kser.idxmin(), pser.idxmin())
self.assertEqual(kser.idxmin(skipna=False), pser.idxmin(skipna=False))
index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second")
)
pser = pd.Series(data=[1, 2, 4, 5], index=index)
kser = ks.Series(pser)
self.assertEqual(kser.idxmin(), pser.idxmin())
self.assertEqual(kser.idxmin(skipna=False), pser.idxmin(skipna=False))
kser = ks.Series([])
with self.assertRaisesRegex(ValueError, "an empty sequence"):
kser.idxmin()
pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
kser = ks.Series(pser)
self.assertEqual(kser.idxmin(), pser.idxmin())
self.assertEqual(repr(kser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False)))
def test_shift(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
if LooseVersion(pd.__version__) < LooseVersion("0.24.2"):
self.assert_eq(kser.shift(periods=2), pser.shift(periods=2))
else:
self.assert_eq(kser.shift(periods=2, fill_value=0), pser.shift(periods=2, fill_value=0))
with self.assertRaisesRegex(ValueError, "periods should be an int; however"):
kser.shift(periods=1.5)
def test_astype(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.astype(np.int32), pser.astype(np.int32))
self.assert_eq(kser.astype(bool), pser.astype(bool))
pser = pd.Series([10, 20, 15, 30, 45, None, np.nan], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.astype(bool), pser.astype(bool))
pser = pd.Series(["hi", "hi ", " ", " \t", "", None], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.astype(bool), pser.astype(bool))
# TODO: restore after pandas 1.1.4 is released.
# self.assert_eq(kser.astype(str).tolist(), pser.astype(str).tolist())
self.assert_eq(kser.astype(str).tolist(), ["hi", "hi ", " ", " \t", "", "None"])
self.assert_eq(kser.str.strip().astype(bool), pser.str.strip().astype(bool))
pser = pd.Series([True, False, None], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.astype(bool), pser.astype(bool))
with self.assertRaisesRegex(TypeError, "not understood"):
kser.astype("int63")
def test_aggregate(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
msg = "func must be a string or list of strings"
with self.assertRaisesRegex(ValueError, msg):
kser.aggregate({"x": ["min", "max"]})
msg = (
"If the given function is a list, it " "should only contains function names as strings."
)
with self.assertRaisesRegex(ValueError, msg):
kser.aggregate(["min", max])
def test_drop(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.drop(1), pser.drop(1))
self.assert_eq(kser.drop([1, 4]), pser.drop([1, 4]))
msg = "Need to specify at least one of 'labels' or 'index'"
with self.assertRaisesRegex(ValueError, msg):
kser.drop()
self.assertRaises(KeyError, lambda: kser.drop((0, 1)))
# For MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.drop("lama"), pser.drop("lama"))
self.assert_eq(kser.drop(labels="weight", level=1), pser.drop(labels="weight", level=1))
self.assert_eq(kser.drop(("lama", "weight")), pser.drop(("lama", "weight")))
self.assert_eq(
kser.drop([("lama", "speed"), ("falcon", "weight")]),
pser.drop([("lama", "speed"), ("falcon", "weight")]),
)
self.assert_eq(kser.drop({"lama": "speed"}), pser.drop({"lama": "speed"}))
msg = "'level' should be less than the number of indexes"
with self.assertRaisesRegex(ValueError, msg):
kser.drop(labels="weight", level=2)
msg = (
"If the given index is a list, it "
"should only contains names as all tuples or all non tuples "
"that contain index names"
)
with self.assertRaisesRegex(ValueError, msg):
kser.drop(["lama", ["cow", "falcon"]])
msg = "Cannot specify both 'labels' and 'index'"
with self.assertRaisesRegex(ValueError, msg):
kser.drop("lama", index="cow")
msg = r"'Key length \(2\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
kser.drop(("lama", "speed", "x"))
def test_pop(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pdf = pd.DataFrame({"x": [45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3]}, index=midx)
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.pop(("lama", "speed")), pser.pop(("lama", "speed")))
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
msg = r"'Key length \(3\) exceeds index depth \(2\)'"
with self.assertRaisesRegex(KeyError, msg):
kser.pop(("lama", "speed", "x"))
def test_replace(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
self.assert_eq(kser.replace(), pser.replace())
self.assert_eq(kser.replace({}), pser.replace({}))
msg = "'to_replace' should be one of str, list, dict, int, float"
with self.assertRaisesRegex(ValueError, msg):
kser.replace(ks.range(5))
msg = "Replacement lists must match in length. Expecting 3 got 2"
with self.assertRaisesRegex(ValueError, msg):
kser.replace([10, 20, 30], [1, 2])
msg = "replace currently not support for regex"
with self.assertRaisesRegex(NotImplementedError, msg):
kser.replace(r"^1.$", regex=True)
def test_xs(self):
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.xs(("a", "lama", "speed")), pser.xs(("a", "lama", "speed")))
def test_duplicates(self):
psers = {
"test on texts": pd.Series(
["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal"
),
"test on numbers": pd.Series([1, 1, 2, 4, 3]),
}
keeps = ["first", "last", False]
for (msg, pser), keep in product(psers.items(), keeps):
with self.subTest(msg, keep=keep):
kser = ks.Series(pser)
self.assert_eq(
pser.drop_duplicates(keep=keep).sort_values(),
kser.drop_duplicates(keep=keep).sort_values(),
)
def test_update(self):
pser = pd.Series([10, 20, 15, 30, 45], name="x")
kser = ks.Series(pser)
msg = "'other' must be a Series"
with self.assertRaisesRegex(ValueError, msg):
kser.update(10)
def test_where(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
kser1 = ks.from_pandas(pser1)
self.assert_eq(pser1.where(pser1 > 3), kser1.where(kser1 > 3).sort_index())
def test_mask(self):
pser1 = pd.Series([0, 1, 2, 3, 4])
kser1 = ks.from_pandas(pser1)
self.assert_eq(pser1.mask(pser1 > 3), kser1.mask(kser1 > 3).sort_index())
def test_truncate(self):
pser1 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
kser1 = ks.Series(pser1)
pser2 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[7, 6, 5, 4, 3, 2, 1])
kser2 = ks.Series(pser2)
self.assert_eq(kser1.truncate(), pser1.truncate())
self.assert_eq(kser1.truncate(before=2), pser1.truncate(before=2))
self.assert_eq(kser1.truncate(after=5), pser1.truncate(after=5))
self.assert_eq(kser1.truncate(copy=False), pser1.truncate(copy=False))
self.assert_eq(kser1.truncate(2, 5, copy=False), pser1.truncate(2, 5, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(kser2.truncate(4, 6), pser2.truncate(4, 6))
self.assert_eq(kser2.truncate(4, 6, copy=False), pser2.truncate(4, 6, copy=False))
else:
expected_kser = ks.Series([20, 30, 40], index=[6, 5, 4])
self.assert_eq(kser2.truncate(4, 6), expected_kser)
self.assert_eq(kser2.truncate(4, 6, copy=False), expected_kser)
kser = ks.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 3, 2, 1])
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
kser.truncate()
kser = ks.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7])
msg = "Truncate: 2 must be after 5"
with self.assertRaisesRegex(ValueError, msg):
kser.truncate(5, 2)
def test_getitem(self):
pser = pd.Series([10, 20, 15, 30, 45], ["A", "A", "B", "C", "D"])
kser = ks.Series(pser)
self.assert_eq(kser["A"], pser["A"])
self.assert_eq(kser["B"], pser["B"])
self.assert_eq(kser[kser > 15], pser[pser > 15])
# for MultiIndex
midx = pd.MultiIndex(
[["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], name="0", index=midx)
kser = ks.Series(pser)
self.assert_eq(kser["a"], pser["a"])
self.assert_eq(kser["a", "lama"], pser["a", "lama"])
self.assert_eq(kser[kser > 1.5], pser[pser > 1.5])
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
kser[("a", "lama", "speed", "x")]
def test_keys(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.keys(), pser.keys())
def test_index(self):
# to check setting name of Index properly.
idx = pd.Index([1, 2, 3, 4, 5, 6, 7, 8, 9])
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=idx)
kser = ks.from_pandas(pser)
kser.name = "koalas"
pser.name = "koalas"
self.assert_eq(kser.index.name, pser.index.name)
# for check setting names of MultiIndex properly.
kser.names = ["hello", "koalas"]
pser.names = ["hello", "koalas"]
self.assert_eq(kser.index.names, pser.index.names)
def test_pct_change(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
kser = ks.from_pandas(pser)
self.assert_eq(kser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(kser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(kser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(kser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(kser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.pct_change(), pser.pct_change(), check_exact=False)
self.assert_eq(kser.pct_change(periods=2), pser.pct_change(periods=2), check_exact=False)
self.assert_eq(kser.pct_change(periods=-1), pser.pct_change(periods=-1), check_exact=False)
self.assert_eq(kser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000))
self.assert_eq(kser.pct_change(periods=100000000), pser.pct_change(periods=100000000))
def test_axes(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
kser = ks.from_pandas(pser)
self.assert_eq(kser.axes, pser.axes)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.axes, pser.axes)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
ValueError, "`combine_first` only allows `Series` for parameter `other`"
):
kser1.combine_first(50)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# Series come from same DataFrame
pdf = pd.DataFrame(
{
"A": {"falcon": 330.0, "eagle": 160.0},
"B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0},
}
)
pser1 = pdf.A
pser2 = pdf.B
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pser = pd.Series([sparse_vector])
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
kser = ks.from_pandas(pser)
self.assert_eq(kser, pser)
else:
kser = ks.from_pandas(pser)
self.assert_eq(kser, pser)
def test_repeat(self):
pser = pd.Series(["a", "b", "c"], name="0", index=np.random.rand(3))
kser = ks.from_pandas(pser)
self.assert_eq(kser.repeat(3).sort_index(), pser.repeat(3).sort_index())
self.assert_eq(kser.repeat(0).sort_index(), pser.repeat(0).sort_index())
self.assertRaises(ValueError, lambda: kser.repeat(-1))
self.assertRaises(ValueError, lambda: kser.repeat("abc"))
pdf = pd.DataFrame({"a": ["a", "b", "c"], "rep": [10, 20, 30]}, index=np.random.rand(3))
kdf = ks.from_pandas(pdf)
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
self.assertRaises(ValueError, lambda: kdf.a.repeat(kdf.rep))
else:
self.assert_eq(kdf.a.repeat(kdf.rep).sort_index(), pdf.a.repeat(pdf.rep).sort_index())
def test_take(self):
pser = pd.Series([100, 200, 300, 400, 500], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(kser.take([0, 2, 4]).sort_values(), pser.take([0, 2, 4]).sort_values())
self.assert_eq(
kser.take(range(0, 5, 2)).sort_values(), pser.take(range(0, 5, 2)).sort_values()
)
self.assert_eq(kser.take([-4, -2, 0]).sort_values(), pser.take([-4, -2, 0]).sort_values())
self.assert_eq(
kser.take(range(-2, 1, 2)).sort_values(), pser.take(range(-2, 1, 2)).sort_values()
)
# Checking the type of indices.
self.assertRaises(ValueError, lambda: kser.take(1))
self.assertRaises(ValueError, lambda: kser.take("1"))
self.assertRaises(ValueError, lambda: kser.take({1, 2}))
self.assertRaises(ValueError, lambda: kser.take({1: None, 2: None}))
def test_divmod(self):
pser = pd.Series([100, None, 300, None, 500], name="Koalas")
kser = ks.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
kdiv, kmod = kser.divmod(-100)
pdiv, pmod = pser.divmod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = kser.divmod(100)
pdiv, pmod = pser.divmod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
kdiv, kmod = kser.divmod(-100)
pdiv, pmod = pser.floordiv(-100), pser.mod(-100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
kdiv, kmod = kser.divmod(100)
pdiv, pmod = pser.floordiv(100), pser.mod(100)
self.assert_eq(kdiv, pdiv)
self.assert_eq(kmod, pmod)
def test_rdivmod(self):
pser = pd.Series([100, None, 300, None, 500])
kser = ks.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
krdiv, krmod = kser.rdivmod(-100)
prdiv, prmod = pser.rdivmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = kser.rdivmod(100)
prdiv, prmod = pser.rdivmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
krdiv, krmod = kser.rdivmod(-100)
prdiv, prmod = pser.rfloordiv(-100), pser.rmod(-100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
krdiv, krmod = kser.rdivmod(100)
prdiv, prmod = pser.rfloordiv(100), pser.rmod(100)
self.assert_eq(krdiv, prdiv)
self.assert_eq(krmod, prmod)
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(kser.mod(-150), pser.mod(-150))
self.assert_eq(kser.mod(0), pser.mod(0))
self.assert_eq(kser.mod(150), pser.mod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.a.mod(kdf.b), pdf.a.mod(pdf.b))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(kser.rmod(-150), pser.rmod(-150))
self.assert_eq(kser.rmod(0), pser.rmod(0))
self.assert_eq(kser.rmod(150), pser.rmod(150))
pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.a.rmod(kdf.b), pdf.a.rmod(pdf.b))
def test_asof(self):
pser = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(kser.asof(20), pser.asof(20))
self.assert_eq(kser.asof([5, 20]).sort_index(), pser.asof([5, 20]).sort_index())
self.assert_eq(kser.asof(100), pser.asof(100))
self.assert_eq(repr(kser.asof(-100)), repr(pser.asof(-100)))
self.assert_eq(kser.asof([-100, 100]).sort_index(), pser.asof([-100, 100]).sort_index())
# where cannot be an Index, Series or a DataFrame
self.assertRaises(ValueError, lambda: kser.asof(ks.Index([-100, 100])))
self.assertRaises(ValueError, lambda: kser.asof(ks.Series([-100, 100])))
self.assertRaises(ValueError, lambda: kser.asof(ks.DataFrame({"A": [1, 2, 3]})))
# asof is not supported for a MultiIndex
pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("y", "d")])
kser = ks.from_pandas(pser)
self.assertRaises(ValueError, lambda: kser.asof(20))
# asof requires a sorted index (More precisely, should be a monotonic increasing)
kser = ks.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40], name="Koalas")
self.assertRaises(ValueError, lambda: kser.asof(20))
kser = ks.Series([1, 2, np.nan, 4], index=[40, 30, 20, 10], name="Koalas")
self.assertRaises(ValueError, lambda: kser.asof(20))
def test_squeeze(self):
# Single value
pser = pd.Series([90])
kser = ks.from_pandas(pser)
self.assert_eq(kser.squeeze(), pser.squeeze())
# Single value with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "b", "c")])
pser = pd.Series([90], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.squeeze(), pser.squeeze())
# Multiple values
pser = pd.Series([90, 91, 85])
kser = ks.from_pandas(pser)
self.assert_eq(kser.squeeze(), pser.squeeze())
# Multiple values with MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series([90, 91, 85], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(kser.squeeze(), pser.squeeze())
def test_div_zero_and_nan(self):
pser = pd.Series([100, None, -300, None, 500, -700, np.inf, -np.inf], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(pser.div(0), kser.div(0))
self.assert_eq(pser.truediv(0), kser.truediv(0))
self.assert_eq(pser / 0, kser / 0)
self.assert_eq(pser.div(np.nan), kser.div(np.nan))
self.assert_eq(pser.truediv(np.nan), kser.truediv(np.nan))
self.assert_eq(pser / np.nan, kser / np.nan)
# floordiv has different behavior in pandas > 1.0.0 when divide by 0
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(pser.floordiv(0), kser.floordiv(0))
self.assert_eq(pser // 0, kser // 0)
else:
result = pd.Series(
[np.inf, np.nan, -np.inf, np.nan, np.inf, -np.inf, np.inf, -np.inf], name="Koalas"
)
self.assert_eq(kser.floordiv(0), result)
self.assert_eq(kser // 0, result)
self.assert_eq(pser.floordiv(np.nan), kser.floordiv(np.nan))
def test_mad(self):
pser = pd.Series([1, 2, 3, 4], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(pser.mad(), kser.mad())
pser = pd.Series([None, -2, 5, 10, 50, np.nan, -20], name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(pser.mad(), kser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([1, 2, 3, 4, 5], name="Koalas")
pser.index = pmidx
kser = ks.from_pandas(pser)
self.assert_eq(pser.mad(), kser.mad())
pmidx = pd.MultiIndex.from_tuples(
[("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")]
)
pser = pd.Series([None, -2, 5, 50, np.nan], name="Koalas")
pser.index = pmidx
kser = ks.from_pandas(pser)
self.assert_eq(pser.mad(), kser.mad())
def test_to_frame(self):
pser = pd.Series(["a", "b", "c"])
kser = ks.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), kser.to_frame(name="a"))
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(pser.to_frame(name="a"), kser.to_frame(name="a"))
def test_shape(self):
pser = pd.Series(["a", "b", "c"])
kser = ks.from_pandas(pser)
self.assert_eq(pser.shape, kser.shape)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
pser = pd.Series(["a", "b", "c"], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(pser.shape, kser.shape)
def test_to_markdown(self):
pser = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")
kser = ks.from_pandas(pser)
# `to_markdown()` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assertRaises(NotImplementedError, lambda: kser.to_markdown())
else:
self.assert_eq(pser.to_markdown(), kser.to_markdown())
def test_unstack(self):
pser = pd.Series(
[10, -2, 4, 7],
index=pd.MultiIndex.from_tuples(
[("one", "a", "z"), ("one", "b", "x"), ("two", "a", "c"), ("two", "b", "v")],
names=["A", "B", "C"],
),
)
kser = ks.from_pandas(pser)
levels = [-3, -2, -1, 0, 1, 2]
for level in levels:
pandas_result = pser.unstack(level=level)
koalas_result = kser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, koalas_result)
self.assert_eq(pandas_result.index.names, koalas_result.index.names)
self.assert_eq(pandas_result.columns.names, koalas_result.columns.names)
# non-numeric datatypes
pser = pd.Series(
list("abcd"), index=pd.MultiIndex.from_product([["one", "two"], ["a", "b"]])
)
kser = ks.from_pandas(pser)
levels = [-2, -1, 0, 1]
for level in levels:
pandas_result = pser.unstack(level=level)
koalas_result = kser.unstack(level=level).sort_index()
self.assert_eq(pandas_result, koalas_result)
self.assert_eq(pandas_result.index.names, koalas_result.index.names)
self.assert_eq(pandas_result.columns.names, koalas_result.columns.names)
# Exceeding the range of level
self.assertRaises(IndexError, lambda: kser.unstack(level=3))
self.assertRaises(IndexError, lambda: kser.unstack(level=-4))
# Only support for MultiIndex
kser = ks.Series([10, -2, 4, 7])
self.assertRaises(ValueError, lambda: kser.unstack())
def test_item(self):
kser = ks.Series([10, 20])
self.assertRaises(ValueError, lambda: kser.item())
def test_filter(self):
pser = pd.Series([0, 1, 2], index=["one", "two", "three"])
kser = ks.from_pandas(pser)
self.assert_eq(pser.filter(items=["one", "three"]), kser.filter(items=["one", "three"]))
self.assert_eq(pser.filter(regex="e$"), kser.filter(regex="e$"))
self.assert_eq(pser.filter(like="hre"), kser.filter(like="hre"))
with self.assertRaisesRegex(ValueError, "Series does not support columns axis."):
kser.filter(like="hre", axis=1)
# for MultiIndex
midx = pd.MultiIndex.from_tuples([("one", "x"), ("two", "y"), ("three", "z")])
pser = pd.Series([0, 1, 2], index=midx)
kser = ks.from_pandas(pser)
self.assert_eq(
pser.filter(items=[("one", "x"), ("three", "z")]),
kser.filter(items=[("one", "x"), ("three", "z")]),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
kser.filter(items=[["one", "x"], ("three", "z")])
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
kser.filter(items=[(), ("three", "z")])
def test_abs(self):
pser = pd.Series([-2, -1, 0, 1])
kser = ks.from_pandas(pser)
self.assert_eq(abs(kser), abs(pser))
self.assert_eq(np.abs(kser), np.abs(pser))
def test_bfill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.bfill(), pser.bfill())
self.assert_eq(kser.bfill()[0], pser.bfill()[0])
kser.bfill(inplace=True)
pser.bfill(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kser[0], pser[0])
self.assert_eq(kdf, pdf)
def test_ffill(self):
pdf = pd.DataFrame({"x": [np.nan, 2, 3, 4, np.nan, 6], "y": [np.nan, 2, 3, 4, np.nan, 6]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
self.assert_eq(kser.ffill(), pser.ffill())
self.assert_eq(kser.ffill()[4], pser.ffill()[4])
kser.ffill(inplace=True)
pser.ffill(inplace=True)
self.assert_eq(kser, pser)
self.assert_eq(kser[4], pser[4])
self.assert_eq(kdf, pdf)
def test_iteritems(self):
pser = pd.Series(["A", "B", "C"])
kser = ks.from_pandas(pser)
for (p_name, p_items), (k_name, k_items) in zip(pser.iteritems(), kser.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_droplevel(self):
# droplevel is new in pandas 0.24.0
if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"):
pser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_tuples(
[("x", "a", "q"), ("x", "b", "w"), ("y", "c", "e")],
names=["level_1", "level_2", "level_3"],
),
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.droplevel(0), kser.droplevel(0))
self.assert_eq(pser.droplevel("level_1"), kser.droplevel("level_1"))
self.assert_eq(pser.droplevel(-1), kser.droplevel(-1))
self.assert_eq(pser.droplevel([0]), kser.droplevel([0]))
self.assert_eq(pser.droplevel(["level_1"]), kser.droplevel(["level_1"]))
self.assert_eq(pser.droplevel((0,)), kser.droplevel((0,)))
self.assert_eq(pser.droplevel(("level_1",)), kser.droplevel(("level_1",)))
self.assert_eq(pser.droplevel([0, 2]), kser.droplevel([0, 2]))
self.assert_eq(
pser.droplevel(["level_1", "level_3"]), kser.droplevel(["level_1", "level_3"])
)
self.assert_eq(pser.droplevel((1, 2)), kser.droplevel((1, 2)))
self.assert_eq(
pser.droplevel(("level_2", "level_3")), kser.droplevel(("level_2", "level_3"))
)
with self.assertRaisesRegex(KeyError, "Level {0, 1, 2} not found"):
kser.droplevel({0, 1, 2})
with self.assertRaisesRegex(KeyError, "Level level_100 not found"):
kser.droplevel(["level_1", "level_100"])
with self.assertRaisesRegex(
IndexError, "Too many levels: Index has only 3 levels, not 11"
):
kser.droplevel(10)
with self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 3 levels, -10 is not a valid level number",
):
kser.droplevel(-10)
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left.",
):
kser.droplevel([0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 5 levels from an index with 3 levels: "
"at least one level must be left.",
):
kser.droplevel([1, 1, 1, 1, 1])
# Tupled names
pser.index.names = [("a", "1"), ("b", "2"), ("c", "3")]
kser = ks.from_pandas(pser)
self.assert_eq(
pser.droplevel([("a", "1"), ("c", "3")]), kser.droplevel([("a", "1"), ("c", "3")])
)
@unittest.skipIf(
LooseVersion(pyspark.__version__) < LooseVersion("3.0"),
"tail won't work properly with PySpark<3.0",
)
def test_tail(self):
pser = pd.Series(range(1000), name="Koalas")
kser = ks.from_pandas(pser)
self.assert_eq(pser.tail(), kser.tail())
self.assert_eq(pser.tail(10), kser.tail(10))
self.assert_eq(pser.tail(-990), kser.tail(-990))
self.assert_eq(pser.tail(0), kser.tail(0))
self.assert_eq(pser.tail(1001), kser.tail(1001))
self.assert_eq(pser.tail(-1001), kser.tail(-1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
kser.tail("10")
def test_product(self):
pser = pd.Series([10, 20, 30, 40, 50])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
# Containing NA values
pser = pd.Series([10, np.nan, 30, np.nan, 50])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod(), almost=True)
# All-NA values
pser = pd.Series([np.nan, np.nan, np.nan])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
# Empty Series
pser = pd.Series([])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
# Boolean Series
pser = pd.Series([True, True, True])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
pser = pd.Series([False, False, False])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
pser = pd.Series([True, False, True])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(), kser.prod())
# With `min_count` parameter
pser = pd.Series([10, 20, 30, 40, 50])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(min_count=5), kser.prod(min_count=5))
# Using `repr` since the result of below will be `np.nan`.
self.assert_eq(repr(pser.prod(min_count=6)), repr(kser.prod(min_count=6)))
pser = pd.Series([10, np.nan, 30, np.nan, 50])
kser = ks.from_pandas(pser)
self.assert_eq(pser.prod(min_count=3), kser.prod(min_count=3), almost=True)
# ditto.
self.assert_eq(repr(pser.prod(min_count=4)), repr(kser.prod(min_count=4)))
pser = pd.Series([np.nan, np.nan, np.nan])
kser = ks.from_pandas(pser)
# ditto.
self.assert_eq(repr(pser.prod(min_count=1)), repr(kser.prod(min_count=1)))
pser = pd.Series([])
kser = ks.from_pandas(pser)
# ditto.
self.assert_eq(repr(pser.prod(min_count=1)), repr(kser.prod(min_count=1)))
with self.assertRaisesRegex(TypeError, "cannot perform prod with type object"):
ks.Series(["a", "b", "c"]).prod()
with self.assertRaisesRegex(TypeError, "cannot perform prod with type datetime64"):
ks.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).prod()
def test_hasnans(self):
# BooleanType
pser = pd.Series([True, False, True, True])
kser = ks.from_pandas(pser)
self.assert_eq(pser.hasnans, kser.hasnans)
pser = pd.Series([True, False, np.nan, True])
kser = ks.from_pandas(pser)
self.assert_eq(pser.hasnans, kser.hasnans)
# TimestampType
pser = pd.Series([pd.Timestamp("2020-07-30") for _ in range(3)])
kser = ks.from_pandas(pser)
self.assert_eq(pser.hasnans, kser.hasnans)
pser = pd.Series([pd.Timestamp("2020-07-30"), np.nan, pd.Timestamp("2020-07-30")])
kser = ks.from_pandas(pser)
self.assert_eq(pser.hasnans, kser.hasnans)
def test_last_valid_index(self):
# `pyspark.sql.dataframe.DataFrame.tail` is new in pyspark >= 3.0.
if LooseVersion(pyspark.__version__) >= LooseVersion("3.0"):
pser = pd.Series([250, 1.5, 320, 1, 0.3, None, None, None, None])
kser = ks.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), kser.last_valid_index())
# MultiIndex columns
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser.index = midx
kser = ks.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), kser.last_valid_index())
# Empty Series
pser = pd.Series([])
kser = ks.from_pandas(pser)
self.assert_eq(pser.last_valid_index(), kser.last_valid_index())
def test_first_valid_index(self):
# Empty Series
pser = pd.Series([])
kser = ks.from_pandas(pser)
self.assert_eq(pser.first_valid_index(), kser.first_valid_index())
def test_pad(self):
pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x")
kser = ks.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pser.pad(), kser.pad())
# Test `inplace=True`
pser.pad(inplace=True)
kser.pad(inplace=True)
self.assert_eq(pser, kser)
else:
expected = ks.Series([np.nan, 2, 3, 4, 4, 6], name="x")
self.assert_eq(expected, kser.pad())
# Test `inplace=True`
kser.pad(inplace=True)
self.assert_eq(expected, kser)
def test_explode(self):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
kser = ks.from_pandas(pser)
self.assert_eq(pser.explode(), kser.explode(), almost=True)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
kser = ks.from_pandas(pser)
self.assert_eq(pser.explode(), kser.explode(), almost=True)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
kser = ks.from_pandas(pser)
self.assert_eq(pser.explode(), kser.explode())
else:
pser = pd.Series([[1, 2, 3], [], None, [3, 4]])
kser = ks.from_pandas(pser)
expected = pd.Series([1.0, 2.0, 3.0, None, None, 3.0, 4.0], index=[0, 0, 0, 1, 2, 3, 3])
self.assert_eq(kser.explode(), expected)
# MultiIndex
pser.index = pd.MultiIndex.from_tuples([("a", "w"), ("b", "x"), ("c", "y"), ("d", "z")])
kser = ks.from_pandas(pser)
expected = pd.Series(
[1.0, 2.0, 3.0, None, None, 3.0, 4.0],
index=pd.MultiIndex.from_tuples(
[
("a", "w"),
("a", "w"),
("a", "w"),
("b", "x"),
("c", "y"),
("d", "z"),
("d", "z"),
]
),
)
self.assert_eq(kser.explode(), expected)
# non-array type Series
pser = pd.Series([1, 2, 3, 4])
kser = ks.from_pandas(pser)
expected = pser
self.assert_eq(kser.explode(), expected)
def test_argsort(self):
# Without null values
pser = pd.Series([0, -100, 50, 100, 20], index=["A", "B", "C", "D", "E"])
kser = ks.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), kser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-kser).argsort().sort_index())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), kser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-kser).argsort().sort_index())
# With name
pser.name = "Koalas"
kser = ks.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), kser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-kser).argsort().sort_index())
# Series from Index
pidx = pd.Index([4.0, -6.0, 2.0, -100.0, 11.0, 20.0, 1.0, -99.0])
kidx = ks.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), kidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-kidx.to_series()).argsort().sort_index()
)
# Series from Index with name
pidx.name = "Koalas"
kidx = ks.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), kidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-kidx.to_series()).argsort().sort_index()
)
# Series from DataFrame
pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]})
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf.A.argsort().sort_index(), kdf.A.argsort().sort_index())
self.assert_eq((-pdf.A).argsort().sort_index(), (-kdf.A).argsort().sort_index())
# With null values
pser = pd.Series([0, -100, np.nan, 100, np.nan], index=["A", "B", "C", "D", "E"])
kser = ks.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), kser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-kser).argsort().sort_index())
# MultiIndex with null values
pser.index = pd.MultiIndex.from_tuples(
[("a", "v"), ("b", "w"), ("c", "x"), ("d", "y"), ("e", "z")]
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), kser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-kser).argsort().sort_index())
# With name with null values
pser.name = "Koalas"
kser = ks.from_pandas(pser)
self.assert_eq(pser.argsort().sort_index(), kser.argsort().sort_index())
self.assert_eq((-pser).argsort().sort_index(), (-kser).argsort().sort_index())
# Series from Index with null values
pidx = pd.Index([4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0])
kidx = ks.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), kidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-kidx.to_series()).argsort().sort_index()
)
# Series from Index with name with null values
pidx.name = "Koalas"
kidx = ks.from_pandas(pidx)
self.assert_eq(
pidx.to_series().argsort().sort_index(), kidx.to_series().argsort().sort_index()
)
self.assert_eq(
(-pidx.to_series()).argsort().sort_index(), (-kidx.to_series()).argsort().sort_index()
)
# Series from DataFrame with null values
pdf = pd.DataFrame({"A": [4.0, -6.0, 2.0, np.nan, -100.0, 11.0, 20.0, np.nan, 1.0, -99.0]})
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf.A.argsort().sort_index(), kdf.A.argsort().sort_index())
self.assert_eq((-pdf.A).argsort().sort_index(), (-kdf.A).argsort().sort_index())
def test_argmin_argmax(self):
pser = pd.Series(
{
"Corn Flakes": 100.0,
"Almond Delight": 110.0,
"Cinnamon Toast Crunch": 120.0,
"Cocoa Puff": 110.0,
"Expensive Flakes": 120.0,
"Cheap Flakes": 100.0,
},
name="Koalas",
)
kser = ks.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(pser.argmin(), kser.argmin())
self.assert_eq(pser.argmax(), kser.argmax())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")]
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.argmin(), kser.argmin())
self.assert_eq(pser.argmax(), kser.argmax())
# Null Series
self.assert_eq(pd.Series([np.nan]).argmin(), ks.Series([np.nan]).argmin())
self.assert_eq(pd.Series([np.nan]).argmax(), ks.Series([np.nan]).argmax())
else:
self.assert_eq(pser.values.argmin(), kser.argmin())
self.assert_eq(pser.values.argmax(), kser.argmax())
# MultiIndex
pser.index = pd.MultiIndex.from_tuples(
[("a", "t"), ("b", "u"), ("c", "v"), ("d", "w"), ("e", "x"), ("f", "u")]
)
kser = ks.from_pandas(pser)
self.assert_eq(pser.values.argmin(), kser.argmin())
self.assert_eq(pser.values.argmax(), kser.argmax())
# Null Series
self.assert_eq(-1, ks.Series([np.nan]).argmin())
self.assert_eq(-1, ks.Series([np.nan]).argmax())
with self.assertRaisesRegex(ValueError, "attempt to get argmin of an empty sequence"):
ks.Series([]).argmin()
with self.assertRaisesRegex(ValueError, "attempt to get argmax of an empty sequence"):
ks.Series([]).argmax()
def test_backfill(self):
pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x")
kser = ks.from_pandas(pser)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pser.backfill(), kser.backfill())
# Test `inplace=True`
pser.backfill(inplace=True)
kser.backfill(inplace=True)
self.assert_eq(pser, kser)
else:
expected = ks.Series([2.0, 2.0, 3.0, 4.0, 6.0, 6.0], name="x")
self.assert_eq(expected, kser.backfill())
# Test `inplace=True`
kser.backfill(inplace=True)
self.assert_eq(expected, kser)
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
kser1 = ks.Series(["b", "c", np.nan, "g", np.nan])
kser2 = ks.Series(["a", "c", np.nan, np.nan, "h"])
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
kser1 = ks.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series([1, 2, 3, 4, 5], index=pd.Index([1, 2, 3, 4, 5]),)
kser2 = ks.Series([2, 2, 3, 4, 1], index=pd.Index([5, 4, 3, 2, 1]),)
kser1.compare(kser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
kser1.compare(kser2)
| 1 | 16,840 | Shall we also add `ks.Series([1, 2, 3], name=["0", "1"])`? | databricks-koalas | py |
@@ -45,7 +45,7 @@ public class PermissioningJsonRpcRequestFactory {
Request<?, AddNodeResponse> addNodesToWhitelist(final List<URI> enodeList) {
return new Request<>(
- "perm_addNodesToWhitelist",
+ "perm_addNodesToAllowlist",
Collections.singletonList(enodeList),
web3jService,
AddNodeResponse.class); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance.dsl.transaction.perm;
import java.net.URI;
import java.util.Collections;
import java.util.List;
import org.assertj.core.util.Lists;
import org.web3j.protocol.Web3jService;
import org.web3j.protocol.core.Request;
import org.web3j.protocol.core.Response;
public class PermissioningJsonRpcRequestFactory {
public static class AddAccountsToWhitelistResponse extends Response<String> {}
public static class RemoveAccountsFromWhitelistResponse extends Response<String> {}
public static class GetAccountsWhitelistResponse extends Response<List<String>> {}
public static class AddNodeResponse extends Response<String> {}
public static class RemoveNodeResponse extends Response<String> {}
public static class GetNodesWhitelistResponse extends Response<List<String>> {}
private final Web3jService web3jService;
public PermissioningJsonRpcRequestFactory(final Web3jService web3jService) {
this.web3jService = web3jService;
}
Request<?, AddNodeResponse> addNodesToWhitelist(final List<URI> enodeList) {
return new Request<>(
"perm_addNodesToWhitelist",
Collections.singletonList(enodeList),
web3jService,
AddNodeResponse.class);
}
Request<?, RemoveNodeResponse> removeNodesFromWhitelist(final List<URI> enodeList) {
return new Request<>(
"perm_removeNodesFromWhitelist",
Collections.singletonList(enodeList),
web3jService,
RemoveNodeResponse.class);
}
Request<?, GetNodesWhitelistResponse> getNodesWhitelist() {
return new Request<>(
"perm_getNodesWhitelist", Lists.emptyList(), web3jService, GetNodesWhitelistResponse.class);
}
Request<?, GetAccountsWhitelistResponse> getAccountsWhitelist() {
return new Request<>(
"perm_getAccountsWhitelist", null, web3jService, GetAccountsWhitelistResponse.class);
}
Request<?, AddAccountsToWhitelistResponse> addAccountsToWhitelist(final List<String> accounts) {
return new Request<>(
"perm_addAccountsToWhitelist",
Collections.singletonList(accounts),
web3jService,
AddAccountsToWhitelistResponse.class);
}
Request<?, RemoveAccountsFromWhitelistResponse> removeAccountsFromWhitelist(
final List<String> accounts) {
return new Request<>(
"perm_removeAccountsFromWhitelist",
Collections.singletonList(accounts),
web3jService,
RemoveAccountsFromWhitelistResponse.class);
}
}
| 1 | 22,896 | Can this be updated? | hyperledger-besu | java |
@@ -89,14 +89,7 @@ final class AddProviderCompilerPass implements CompilerPassInterface
$definition = $container->getDefinition($id);
foreach ($context['formats'] as $format => $formatConfig) {
- $formatConfig['quality'] = $formatConfig['quality'] ?? 80;
- $formatConfig['format'] = $formatConfig['format'] ?? 'jpg';
- $formatConfig['height'] = $formatConfig['height'] ?? null;
- $formatConfig['constraint'] = $formatConfig['constraint'] ?? true;
- $formatConfig['resizer'] = $formatConfig['resizer'] ?? false;
-
- $formatName = sprintf('%s_%s', $name, $format);
- $definition->addMethodCall('addFormat', [$formatName, $formatConfig]);
+ $definition->addMethodCall('addFormat', [sprintf('%s_%s', $name, $format), $formatConfig]);
}
}
} | 1 | <?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\DependencyInjection\Compiler;
use Sonata\MediaBundle\DependencyInjection\Configuration;
use Sonata\MediaBundle\Provider\MediaProviderInterface;
use Symfony\Component\Config\Definition\Processor;
use Symfony\Component\DependencyInjection\Compiler\CompilerPassInterface;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Reference;
/**
* @author Thomas Rabaix <[email protected]>
*/
final class AddProviderCompilerPass implements CompilerPassInterface
{
public function process(ContainerBuilder $container): void
{
$config = $this->getExtensionConfig($container);
// define configuration per provider
$this->applyFormats($container, $config);
$this->attachArguments($container, $config);
$this->attachProviders($container);
$format = $container->getParameter('sonata.media.admin_format');
foreach ($container->findTaggedServiceIds('sonata.media.provider') as $id => $attributes) {
$container->getDefinition($id)->addMethodCall(
'addFormat',
[MediaProviderInterface::FORMAT_ADMIN, $format]
);
}
}
private function attachProviders(ContainerBuilder $container): void
{
$pool = $container->getDefinition('sonata.media.pool');
foreach ($container->findTaggedServiceIds('sonata.media.provider') as $id => $attributes) {
$pool->addMethodCall('addProvider', [$id, new Reference($id)]);
}
}
/**
* @param array<string, mixed> $config
*/
private function attachArguments(ContainerBuilder $container, array $config): void
{
foreach ($container->findTaggedServiceIds('sonata.media.provider') as $id => $attributes) {
foreach ($config['providers'] as $provider) {
if ($provider['service'] === $id) {
$definition = $container->getDefinition($id);
$definition
->replaceArgument(1, new Reference($provider['filesystem']))
->replaceArgument(2, new Reference($provider['cdn']))
->replaceArgument(3, new Reference($provider['generator']))
->replaceArgument(4, new Reference($provider['thumbnail']));
if ($provider['resizer']) {
$definition->addMethodCall('setResizer', [new Reference($provider['resizer'])]);
}
}
}
}
}
/**
* Define the default settings to the config array.
*
* @param array<string, mixed> $config
*/
private function applyFormats(ContainerBuilder $container, array $config): void
{
foreach ($config['contexts'] as $name => $context) {
// add the different related formats
foreach ($context['providers'] as $id) {
$definition = $container->getDefinition($id);
foreach ($context['formats'] as $format => $formatConfig) {
$formatConfig['quality'] = $formatConfig['quality'] ?? 80;
$formatConfig['format'] = $formatConfig['format'] ?? 'jpg';
$formatConfig['height'] = $formatConfig['height'] ?? null;
$formatConfig['constraint'] = $formatConfig['constraint'] ?? true;
$formatConfig['resizer'] = $formatConfig['resizer'] ?? false;
$formatName = sprintf('%s_%s', $name, $format);
$definition->addMethodCall('addFormat', [$formatName, $formatConfig]);
}
}
}
}
/**
* @return array<string, mixed>
*/
private function getExtensionConfig(ContainerBuilder $container): array
{
$config = $container->getExtensionConfig('sonata_media');
$config = $container->getParameterBag()->resolveValue($config);
$processor = new Processor();
return $processor->processConfiguration(new Configuration(), $config);
}
}
| 1 | 12,510 | this change is to avoid duplication on the defaults. They are already on the Configuration class. (Also I removed the false default, because the admin format does not have it). This is also produces the rest of the diff, changes from false to null. | sonata-project-SonataMediaBundle | php |
@@ -46,6 +46,8 @@ func (pc *podConfigurator) connectInterfaceToOVSAsync(ifConfig *interfacestore.I
go func() {
klog.Infof("Waiting for interface %s to be created", hostIfAlias)
err := wait.PollImmediate(time.Second, 60*time.Second, func() (bool, error) {
+ containerAccess.lockContainer(containerID)
+ defer containerAccess.unlockContainer(containerID)
curEp, ok := pc.ifConfigurator.getEndpoint(ovsPortName)
if !ok {
return true, fmt.Errorf("failed to find HNSEndpoint %s", ovsPortName) | 1 | // +build windows
// Copyright 2021 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cniserver
import (
"fmt"
"time"
"github.com/containernetworking/cni/pkg/types/current"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/interfacestore"
"github.com/vmware-tanzu/antrea/pkg/agent/util"
)
// connectInterfaceToOVSAsync waits for an interface to be created and connects it to OVS br-int asynchronously
// in another goroutine. The function is for ContainerD runtime. The host interface is created after
// CNI call completes.
func (pc *podConfigurator) connectInterfaceToOVSAsync(ifConfig *interfacestore.InterfaceConfig, containerAccess *containerAccessArbitrator) error {
if containerAccess == nil {
return fmt.Errorf("container lock cannot be null")
}
ovsPortName := ifConfig.InterfaceName
expectedEp, ok := pc.ifConfigurator.getEndpoint(ovsPortName)
if !ok {
return fmt.Errorf("failed to find HNSEndpoint %s", ovsPortName)
}
hostIfAlias := fmt.Sprintf("%s (%s)", util.ContainerVNICPrefix, ovsPortName)
containerID := ifConfig.ContainerID
go func() {
klog.Infof("Waiting for interface %s to be created", hostIfAlias)
err := wait.PollImmediate(time.Second, 60*time.Second, func() (bool, error) {
curEp, ok := pc.ifConfigurator.getEndpoint(ovsPortName)
if !ok {
return true, fmt.Errorf("failed to find HNSEndpoint %s", ovsPortName)
}
if curEp.Id != expectedEp.Id {
klog.Warningf("Detected HNSEndpoint change for port %s, exiting current goroutine", ovsPortName)
return true, nil
}
if !util.HostInterfaceExists(hostIfAlias) {
klog.Infof("Waiting for interface %s to be created", hostIfAlias)
return false, nil
}
if err := pc.connectInterfaceToOVSCommon(ovsPortName, ifConfig); err != nil {
return true, fmt.Errorf("failed to connect to OVS for container %s: %v", containerID, err)
}
return true, nil
})
if err != nil {
klog.Errorf("Failed to create OVS port for container %s: %v", containerID, err)
}
}()
return nil
}
// connectInterfaceToOVS connects an existing interface to OVS br-int.
func (pc *podConfigurator) connectInterfaceToOVS(
podName string,
podNameSpace string,
containerID string,
hostIface *current.Interface,
containerIface *current.Interface,
ips []*current.IPConfig,
containerAccess *containerAccessArbitrator,
) (*interfacestore.InterfaceConfig, error) {
// Use the outer veth interface name as the OVS port name.
ovsPortName := hostIface.Name
containerConfig := buildContainerConfig(ovsPortName, containerID, podName, podNameSpace, containerIface, ips)
hostIfAlias := fmt.Sprintf("%s (%s)", util.ContainerVNICPrefix, ovsPortName)
// - For ContainerD runtime, the container interface is created after CNI replying the network setup result.
// So for such case we need to use asynchronous way to wait for interface to be created.
// - For Docker runtime, antrea-agent still creates OVS port synchronously.
// - Here antrea-agent determines the way of OVS port creation by checking if container interface is yet created.
// If one day ContainerD runtime changes the behavior and container interface can be created when attaching
// HNSEndpoint/HostComputeEndpoint, the current implementation will still work. It will choose the synchronized
// way to create OVS port.
if util.HostInterfaceExists(hostIfAlias) {
return containerConfig, pc.connectInterfaceToOVSCommon(ovsPortName, containerConfig)
} else {
return containerConfig, pc.connectInterfaceToOVSAsync(containerConfig, containerAccess)
}
}
func (pc *podConfigurator) reconcileMissingPods(pods sets.String, containerAccess *containerAccessArbitrator) {
interfacesConfig := pc.ifConfigurator.getInterfaceConfigForPods(pods)
for pod := range pods {
ifaceConfig, ok := interfacesConfig[pod]
if !ok {
klog.Errorf("Failed to reconcile Pod %s: interface config not found", pod)
continue
}
if err := pc.connectInterfaceToOVSAsync(ifaceConfig, containerAccess); err != nil {
klog.Errorf("Failed to reconcile Pod %s: %v", pod, err)
}
}
}
| 1 | 33,029 | What does it protect from? If it's subsequent CNI calls, won't containerID be different from the first one? or it's different in containerd? we use `getInfraContainer` to get the lock identity in CNIAdd. And this reminds me what if the first CNI call and the subquent ones run into connectInterfaceToOVSAsync, will duplicate ports being created or it has been handled? | antrea-io-antrea | go |
@@ -0,0 +1,19 @@
+<?php
+
+namespace Shopsys\FrameworkBundle\Model\Advert\Exception;
+
+use Exception;
+
+class AdvertPositionNotKnownException extends Exception implements AdvertException
+{
+ public function __construct(string $positionName, array $knownPositionsNames, Exception $previous = null)
+ {
+ $message = sprintf(
+ 'Unknown advert position name "%s". Known names are %s.',
+ $positionName,
+ implode('", "', $knownPositionsNames)
+ );
+
+ parent::__construct($message, 0, $previous);
+ }
+} | 1 | 1 | 11,390 | In Exceptions' constructors, we always add a optional last parameter `Exception $previous = null`. In the past, this was (still is?) a part of the coding standards. | shopsys-shopsys | php |
|
@@ -90,6 +90,15 @@ const connectionActivityMonitorInterval = time.Minute * 3
// we discard the connection.
const maxPeerInactivityDuration = time.Minute * 5
+// maxMessageQueueDuration is the maximum amount of time a message is allowed to be waiting
+// in the various queues before being sent. Once that deadline has reached, sending the message
+// is pointless, as it's too stale to be of any value
+const maxMessageQueueDuration = time.Second * 25
+
+// slowWritingPeerMonitorInterval is the interval at which we peek on the connected peers to
+// verify that their current outgoing message is not being blocked for too long.
+const slowWritingPeerMonitorInterval = time.Second * 5
+
var networkIncomingConnections = metrics.MakeGauge(metrics.NetworkIncomingConnections)
var networkOutgoingConnections = metrics.MakeGauge(metrics.NetworkOutgoingConnections)
| 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package network
import (
"container/heap"
"context"
"encoding/base64"
"errors"
"fmt"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"path"
"regexp"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/websocket"
"github.com/gorilla/mux"
"golang.org/x/net/netutil"
"golang.org/x/sys/unix"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
tools_network "github.com/algorand/go-algorand/tools/network"
"github.com/algorand/go-algorand/util/metrics"
)
const incomingThreads = 20
const broadcastThreads = 4
const messageFilterSize = 5000 // messages greater than that size may be blocked by incoming/outgoing filter
// httpServerReadHeaderTimeout is the amount of time allowed to read
// request headers. The connection's read deadline is reset
// after reading the headers and the Handler can decide what
// is considered too slow for the body.
const httpServerReadHeaderTimeout = time.Second * 10
// httpServerWriteTimeout is the maximum duration before timing out
// writes of the response. It is reset whenever a new
// request's header is read.
const httpServerWriteTimeout = time.Second * 60
// httpServerIdleTimeout is the maximum amount of time to wait for the
// next request when keep-alives are enabled. If httpServerIdleTimeout
// is zero, the value of ReadTimeout is used. If both are
// zero, ReadHeaderTimeout is used.
const httpServerIdleTimeout = time.Second * 4
// MaxHeaderBytes controls the maximum number of bytes the
// server will read parsing the request header's keys and
// values, including the request line. It does not limit the
// size of the request body.
const httpServerMaxHeaderBytes = 4096
// MaxInt is the maximum int which might be int32 or int64
const MaxInt = int((^uint(0)) >> 1)
// connectionActivityMonitorInterval is the interval at which we check
// if any of the connected peers have been idle for a long while and
// need to be disconnected.
const connectionActivityMonitorInterval = time.Minute * 3
// maxPeerInactivityDuration is the maximum allowed duration for a
// peer to remain completly idle (i.e. no inbound or outbound communication), before
// we discard the connection.
const maxPeerInactivityDuration = time.Minute * 5
var networkIncomingConnections = metrics.MakeGauge(metrics.NetworkIncomingConnections)
var networkOutgoingConnections = metrics.MakeGauge(metrics.NetworkOutgoingConnections)
var networkIncomingBufferMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_rx_buffer_micros_total", Description: "microseconds spent by incoming messages on the receive buffer"})
var networkHandleMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_rx_handle_micros_total", Description: "microseconds spent by protocol handlers in the receive thread"})
var networkBroadcasts = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcasts_total", Description: "number of broadcast operations"})
var networkBroadcastQueueMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_queue_micros_total", Description: "microseconds broadcast requests sit on queue"})
var networkBroadcastSendMicros = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_broadcast_send_micros_total", Description: "microseconds spent broadcasting"})
var networkBroadcastsDropped = metrics.MakeCounter(metrics.MetricName{Name: "algod_broadcasts_dropped_total", Description: "number of broadcast messages not sent to some peer"})
var networkSlowPeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_slow_drops_total", Description: "number of peers dropped for being slow to send to"})
var networkIdlePeerDrops = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_idle_drops_total", Description: "number of peers dropped due to idle connection"})
var minPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_min_ping_seconds", Description: "Network round trip time to fastest peer in seconds."})
var meanPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_mean_ping_seconds", Description: "Network round trip time to average peer in seconds."})
var medianPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_median_ping_seconds", Description: "Network round trip time to median peer in seconds."})
var maxPing = metrics.MakeGauge(metrics.MetricName{Name: "algod_network_peer_max_ping_seconds", Description: "Network round trip time to slowest peer in seconds."})
// Peer opaque interface for referring to a neighbor in the network
type Peer interface{}
// PeerOption allows users to specify a subset of peers to query
type PeerOption int
const (
// PeersConnectedOut specifies all peers with outgoing connections
PeersConnectedOut PeerOption = iota
// PeersConnectedIn specifies all peers with inbound connections
PeersConnectedIn PeerOption = iota
// PeersPhonebook specifies all peers in the phonebook
PeersPhonebook PeerOption = iota
)
// GossipNode represents a node in the gossip network
type GossipNode interface {
Address() (string, bool)
Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error
Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error
Disconnect(badnode Peer)
DisconnectPeers()
Ready() chan struct{}
// RegisterHTTPHandler path accepts gorilla/mux path annotations
RegisterHTTPHandler(path string, handler http.Handler)
// RequestConnectOutgoing asks the system to actually connect to peers.
// `replace` optionally drops existing connections before making new ones.
// `quit` chan allows cancellation. TODO: use `context`
RequestConnectOutgoing(replace bool, quit <-chan struct{})
// Get a list of Peers we could potentially send a direct message to.
GetPeers(options ...PeerOption) []Peer
// Start threads, listen on sockets.
Start()
// Close sockets. Stop threads.
Stop()
// RegisterHandlers adds to the set of given message handlers.
RegisterHandlers(dispatch []TaggedMessageHandler)
// ClearHandlers deregisters all the existing message handlers.
ClearHandlers()
}
// IncomingMessage represents a message arriving from some peer in our p2p network
type IncomingMessage struct {
Sender Peer
Tag Tag
Data []byte
Err error
Net GossipNode
// Received is time.Time.UnixNano()
Received int64
// processing is a channel that is used by messageHandlerThread
// to indicate that it has started processing this message. It
// is used to ensure fairness across peers in terms of processing
// messages.
processing chan struct{}
}
// Tag is a short string (2 bytes) marking a type of message
type Tag = protocol.Tag
func highPriorityTag(tag protocol.Tag) bool {
return tag == protocol.AgreementVoteTag || tag == protocol.ProposalPayloadTag
}
// OutgoingMessage represents a message we want to send.
type OutgoingMessage struct {
Action ForwardingPolicy
Tag Tag
Payload []byte
}
// ForwardingPolicy is an enum indicating to whom we should send a message
type ForwardingPolicy int
const (
// Ignore - discard (don't forward)
Ignore ForwardingPolicy = iota
// Disconnect - disconnect from the peer that sent this message
Disconnect
// Broadcast - forward to everyone (except the sender)
Broadcast
)
// MessageHandler takes a IncomingMessage (e.g., vote, transaction), processes it, and returns what (if anything)
// to send to the network in response.
// The ForwardingPolicy field of the returned OutgoingMessage indicates whether to reply directly to the sender
// (unicast), propagate to everyone except the sender (broadcast), or do nothing (ignore).
type MessageHandler interface {
Handle(message IncomingMessage) OutgoingMessage
}
// HandlerFunc represents an implemenation of the MessageHandler interface
type HandlerFunc func(message IncomingMessage) OutgoingMessage
// Handle implements MessageHandler.Handle, calling the handler with the IncomingKessage and returning the OutgoingMessage
func (f HandlerFunc) Handle(message IncomingMessage) OutgoingMessage {
return f(message)
}
// TaggedMessageHandler receives one type of broadcast messages
type TaggedMessageHandler struct {
Tag
MessageHandler
}
// Propagate is a convenience function to save typing in the common case of a message handler telling us to propagate an incoming message
// "return network.Propagate(msg)" instead of "return network.OutgoingMsg{network.Broadcast, msg.Tag, msg.Data}"
func Propagate(msg IncomingMessage) OutgoingMessage {
return OutgoingMessage{Broadcast, msg.Tag, msg.Data}
}
// GossipNetworkPath is the URL path to connect to the websocket gossip node at.
// Contains {genesisID} param to be handled by gorilla/mux
const GossipNetworkPath = "/v1/{genesisID}/gossip"
// WebsocketNetwork implements GossipNode
type WebsocketNetwork struct {
listener net.Listener
server http.Server
router *mux.Router
scheme string // are we serving http or https ?
upgrader websocket.Upgrader
config config.Local
log logging.Logger
readBuffer chan IncomingMessage
wg sync.WaitGroup
handlers Multiplexer
ctx context.Context
ctxCancel context.CancelFunc
peersLock deadlock.RWMutex
peers []*wsPeer
broadcastQueueHighPrio chan broadcastRequest
broadcastQueueBulk chan broadcastRequest
phonebook Phonebook
dnsPhonebook ThreadsafePhonebook
GenesisID string
NetworkID protocol.NetworkID
RandomID string
ready int32
readyChan chan struct{}
meshUpdateRequests chan meshRequest
// Keep a record of pending outgoing connections so
// we don't start duplicates connection attempts.
// Needs to be locked because it's accessed from the
// meshThread and also threads started to run tryConnect()
tryConnectAddrs map[string]int64
tryConnectLock deadlock.Mutex
incomingMsgFilter *messageFilter // message filter to remove duplicate incoming messages from different peers
eventualReadyDelay time.Duration
relayMessages bool // True if we should relay messages from other nodes (nominally true for relays, false otherwise)
prioScheme NetPrioScheme
prioTracker *prioTracker
prioResponseChan chan *wsPeer
// once we detect that we have a misconfigured UseForwardedForAddress, we set this and write an warning message.
misconfiguredUseForwardedForAddress bool
}
type broadcastRequest struct {
tag Tag
data []byte
except *wsPeer
done chan struct{}
start time.Time
}
// Address returns a string and whether that is a 'final' address or guessed.
// Part of GossipNode interface
func (wn *WebsocketNetwork) Address() (string, bool) {
parsedURL := url.URL{Scheme: wn.scheme}
var connected bool
if wn.listener == nil {
parsedURL.Host = wn.config.NetAddress
connected = false
} else {
parsedURL.Host = wn.listener.Addr().String()
connected = true
}
return parsedURL.String(), connected
}
// PublicAddress what we tell other nodes to connect to.
// Might be different than our locally percieved network address due to NAT/etc.
// Returns config "PublicAddress" if available, otherwise local addr.
func (wn *WebsocketNetwork) PublicAddress() string {
if len(wn.config.PublicAddress) > 0 {
return wn.config.PublicAddress
}
localAddr, _ := wn.Address()
return localAddr
}
// Broadcast sends a message.
// If except is not nil then we will not send it to that neighboring Peer.
// if wait is true then the call blocks until the packet has actually been sent to all neighbors.
// TODO: add `priority` argument so that we don't have to guess it based on tag
func (wn *WebsocketNetwork) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error {
request := broadcastRequest{tag: tag, data: data, start: time.Now()}
if except != nil {
request.except = except.(*wsPeer)
}
broadcastQueue := wn.broadcastQueueBulk
if highPriorityTag(tag) {
broadcastQueue = wn.broadcastQueueHighPrio
}
if wait {
request.done = make(chan struct{})
select {
case broadcastQueue <- request:
// ok, enqueued
//wn.log.Debugf("broadcast enqueued")
case <-wn.ctx.Done():
return errNetworkClosing
case <-ctx.Done():
return errBcastCallerCancel
}
select {
case <-request.done:
//wn.log.Debugf("broadcast done")
return nil
case <-wn.ctx.Done():
return errNetworkClosing
case <-ctx.Done():
return errBcastCallerCancel
}
}
// no wait
select {
case broadcastQueue <- request:
//wn.log.Debugf("broadcast enqueued nowait")
return nil
default:
wn.log.Debugf("broadcast queue full")
// broadcastQueue full, and we're not going to wait for it.
return errBcastQFull
}
}
// Relay message
func (wn *WebsocketNetwork) Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error {
if wn.relayMessages {
return wn.Broadcast(ctx, tag, data, wait, except)
}
return nil
}
func (wn *WebsocketNetwork) disconnectThread(badnode Peer, reason disconnectReason) {
defer wn.wg.Done()
wn.disconnect(badnode, reason)
}
// Disconnect from a peer, probably due to protocol errors.
func (wn *WebsocketNetwork) Disconnect(node Peer) {
wn.disconnect(node, disconnectBadData)
}
// Disconnect from a peer, probably due to protocol errors.
func (wn *WebsocketNetwork) disconnect(badnode Peer, reason disconnectReason) {
if badnode == nil {
return
}
peer := badnode.(*wsPeer)
peer.CloseAndWait()
wn.removePeer(peer, reason)
}
func closeWaiter(wg *sync.WaitGroup, peer *wsPeer) {
defer wg.Done()
peer.CloseAndWait()
}
// DisconnectPeers shuts down all connections
func (wn *WebsocketNetwork) DisconnectPeers() {
wn.peersLock.Lock()
defer wn.peersLock.Unlock()
closeGroup := sync.WaitGroup{}
closeGroup.Add(len(wn.peers))
for _, peer := range wn.peers {
go closeWaiter(&closeGroup, peer)
}
wn.peers = wn.peers[:0]
closeGroup.Wait()
}
// Ready returns a chan that will be closed when we have a minimum number of peer connections active
func (wn *WebsocketNetwork) Ready() chan struct{} {
return wn.readyChan
}
// RegisterHTTPHandler path accepts gorilla/mux path annotations
func (wn *WebsocketNetwork) RegisterHTTPHandler(path string, handler http.Handler) {
wn.router.Handle(path, handler)
}
// RequestConnectOutgoing tries to actually do the connect to new peers.
// `replace` drop all connections first and find new peers.
func (wn *WebsocketNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) {
request := meshRequest{disconnect: false}
if quit != nil {
request.done = make(chan struct{})
}
select {
case wn.meshUpdateRequests <- request:
case <-quit:
return
}
if request.done != nil {
select {
case <-request.done:
case <-quit:
}
}
}
// GetPeers returns a snapshot of our Peer list, according to the specified options.
// Peers may be duplicated and refer to the same underlying node.
func (wn *WebsocketNetwork) GetPeers(options ...PeerOption) []Peer {
outPeers := make([]Peer, 0)
for _, option := range options {
switch option {
case PeersConnectedOut:
wn.peersLock.RLock()
for _, peer := range wn.peers {
if peer.outgoing {
outPeers = append(outPeers, Peer(peer))
}
}
wn.peersLock.RUnlock()
case PeersPhonebook:
// return copy of phonebook, which probably also contains peers we're connected to, but if it doesn't maybe we shouldn't be making new connections to those peers (because they disappeared from the directory)
var addrs []string
addrs = wn.phonebook.GetAddresses(1000)
for _, addr := range addrs {
outPeers = append(outPeers, &wsPeerCore{net: wn, rootURL: addr})
}
case PeersConnectedIn:
wn.peersLock.RLock()
for _, peer := range wn.peers {
if !peer.outgoing {
outPeers = append(outPeers, Peer(peer))
}
}
wn.peersLock.RUnlock()
}
}
return outPeers
}
func (wn *WebsocketNetwork) setup() {
wn.upgrader.ReadBufferSize = 4096
wn.upgrader.WriteBufferSize = 4096
wn.upgrader.EnableCompression = false
wn.router = mux.NewRouter()
wn.router.Handle(GossipNetworkPath, wn)
wn.server.Handler = wn.router
wn.server.ReadHeaderTimeout = httpServerReadHeaderTimeout
wn.server.WriteTimeout = httpServerWriteTimeout
wn.server.IdleTimeout = httpServerIdleTimeout
wn.server.MaxHeaderBytes = httpServerMaxHeaderBytes
wn.ctx, wn.ctxCancel = context.WithCancel(context.Background())
wn.broadcastQueueHighPrio = make(chan broadcastRequest, 1000)
wn.broadcastQueueBulk = make(chan broadcastRequest, 100)
wn.meshUpdateRequests = make(chan meshRequest, 5)
wn.readyChan = make(chan struct{})
wn.tryConnectAddrs = make(map[string]int64)
wn.eventualReadyDelay = time.Minute
wn.prioTracker = newPrioTracker(wn)
readBufferLen := wn.config.IncomingConnectionsLimit + wn.config.GossipFanout
if readBufferLen < 100 {
readBufferLen = 100
}
if readBufferLen > 10000 {
readBufferLen = 10000
}
wn.readBuffer = make(chan IncomingMessage, readBufferLen)
var rbytes [10]byte
rand.Read(rbytes[:])
wn.RandomID = base64.StdEncoding.EncodeToString(rbytes[:])
if wn.config.EnableIncomingMessageFilter {
wn.incomingMsgFilter = makeMessageFilter(wn.config.IncomingMessageFilterBucketCount, wn.config.IncomingMessageFilterBucketSize)
}
}
func (wn *WebsocketNetwork) rlimitIncomingConnections() error {
var lim unix.Rlimit
err := unix.Getrlimit(unix.RLIMIT_NOFILE, &lim)
if err != nil {
return err
}
// If rlim_max is not sufficient, reduce IncomingConnectionsLimit
var rlimitMaxCap uint64
if lim.Max < wn.config.ReservedFDs {
rlimitMaxCap = 0
} else {
rlimitMaxCap = lim.Max - wn.config.ReservedFDs
}
if rlimitMaxCap > uint64(MaxInt) {
rlimitMaxCap = uint64(MaxInt)
}
if wn.config.IncomingConnectionsLimit > int(rlimitMaxCap) {
wn.log.Warnf("Reducing IncomingConnectionsLimit from %d to %d since RLIMIT_NOFILE is %d",
wn.config.IncomingConnectionsLimit, rlimitMaxCap, lim.Max)
wn.config.IncomingConnectionsLimit = int(rlimitMaxCap)
}
// Set rlim_cur to match IncomingConnectionsLimit
newLimit := uint64(wn.config.IncomingConnectionsLimit) + wn.config.ReservedFDs
if newLimit > lim.Cur {
lim.Cur = newLimit
err = unix.Setrlimit(unix.RLIMIT_NOFILE, &lim)
if err != nil {
return err
}
}
return nil
}
// Start makes network connections and threads
func (wn *WebsocketNetwork) Start() {
var err error
if wn.config.IncomingConnectionsLimit < 0 {
wn.config.IncomingConnectionsLimit = MaxInt
}
// Make sure we do not accept more incoming connections than our
// open file rlimit, with some headroom for other FDs (DNS, log
// files, SQLite files, telemetry, ...)
err = wn.rlimitIncomingConnections()
if err != nil {
wn.log.Error("ws network start: rlimitIncomingConnections ", err)
return
}
if wn.config.NetAddress != "" {
listener, err := net.Listen("tcp", wn.config.NetAddress)
if err != nil {
wn.log.Errorf("network could not listen %v: %s", wn.config.NetAddress, err)
return
}
wn.listener = netutil.LimitListener(listener, wn.config.IncomingConnectionsLimit)
wn.log.Debugf("listening on %s", wn.listener.Addr().String())
}
if wn.config.TLSCertFile != "" && wn.config.TLSKeyFile != "" {
wn.scheme = "https"
} else {
wn.scheme = "http"
}
wn.meshUpdateRequests <- meshRequest{false, nil}
wn.RegisterHandlers(pingHandlers)
wn.RegisterHandlers(prioHandlers)
if wn.listener != nil {
wn.wg.Add(1)
go wn.httpdThread()
}
wn.wg.Add(1)
go wn.meshThread()
if wn.config.PeerPingPeriodSeconds > 0 {
wn.wg.Add(1)
go wn.pingThread()
}
for i := 0; i < incomingThreads; i++ {
wn.wg.Add(1)
go wn.messageHandlerThread()
}
for i := 0; i < broadcastThreads; i++ {
wn.wg.Add(1)
go wn.broadcastThread()
}
wn.wg.Add(1)
go wn.prioWeightRefresh()
wn.log.Infof("serving genesisID=%#v on %#v", wn.GenesisID, wn.PublicAddress())
}
func (wn *WebsocketNetwork) httpdThread() {
defer wn.wg.Done()
var err error
if wn.config.TLSCertFile != "" && wn.config.TLSKeyFile != "" {
err = wn.server.ServeTLS(wn.listener, wn.config.TLSCertFile, wn.config.TLSKeyFile)
} else {
err = wn.server.Serve(wn.listener)
}
if err == http.ErrServerClosed {
} else if err != nil {
wn.log.Info("ws net http server exited ", err)
}
}
// innerStop context for shutting down peers
func (wn *WebsocketNetwork) innerStop() {
wn.peersLock.Lock()
defer wn.peersLock.Unlock()
wn.wg.Add(len(wn.peers))
for _, peer := range wn.peers {
go closeWaiter(&wn.wg, peer)
}
wn.peers = wn.peers[:0]
}
// Stop closes network connections and stops threads.
// Stop blocks until all activity on this node is done.
func (wn *WebsocketNetwork) Stop() {
wn.innerStop()
var listenAddr string
if wn.listener != nil {
listenAddr = wn.listener.Addr().String()
}
wn.ctxCancel()
ctx, timeoutCancel := context.WithTimeout(context.Background(), 5*time.Second)
defer timeoutCancel()
err := wn.server.Shutdown(ctx)
if err != nil {
wn.log.Warnf("problem shutting down %s: %v", listenAddr, err)
}
wn.wg.Wait()
wn.log.Debugf("closed %s", listenAddr)
}
// RegisterHandlers registers the set of given message handlers.
func (wn *WebsocketNetwork) RegisterHandlers(dispatch []TaggedMessageHandler) {
wn.handlers.RegisterHandlers(dispatch)
}
// ClearHandlers deregisters all the existing message handlers.
func (wn *WebsocketNetwork) ClearHandlers() {
wn.handlers.ClearHandlers()
}
func (wn *WebsocketNetwork) setHeaders(header http.Header) {
header.Set(GenesisHeader, wn.GenesisID)
myTelemetryGUID := wn.log.GetTelemetryHostName()
header.Set(TelemetryIDHeader, myTelemetryGUID)
header.Set(ProtocolVersionHeader, ProtocolVersion)
header.Set(AddressHeader, wn.PublicAddress())
header.Set(NodeRandomHeader, wn.RandomID)
}
// retrieve the origin ip address from the http header, if such exists and it's a valid ip address.
func (wn *WebsocketNetwork) getForwardedConnectionAddress(header http.Header) (ip net.IP) {
if wn.config.UseXForwardedForAddressField == "" {
return
}
forwardedForString := header.Get(wn.config.UseXForwardedForAddressField)
if forwardedForString == "" {
if !wn.misconfiguredUseForwardedForAddress {
wn.log.Warnf("UseForwardedForAddressField is configured as '%s', but no value was retrieved from header", wn.config.UseXForwardedForAddressField)
wn.misconfiguredUseForwardedForAddress = true
}
return
}
ip = net.ParseIP(forwardedForString)
if ip == nil {
// if origin isn't a valid IP Address, log this.,
wn.log.Warnf("unable to parse origin address: '%s'", forwardedForString)
}
return
}
func (wn *WebsocketNetwork) checkHeaders(header http.Header, addr string, forwardedAddr net.IP) (ok bool, otherTelemetryGUID string, otherPublicAddr string, otherInstanceName string) {
ok = false
otherTelemetryGUID = ""
otherPublicAddr = ""
otherVersion := header.Get(ProtocolVersionHeader)
if otherVersion != ProtocolVersion {
wn.log.Warnf("new peer %#v version mismatch, mine=%#v theirs=%#v, headers %#v", addr, ProtocolVersion, otherVersion, header)
return
}
otherGenesisID := header.Get(GenesisHeader)
if len(otherGenesisID) > 0 && wn.GenesisID != otherGenesisID {
wn.log.Warnf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", addr, wn.GenesisID, otherGenesisID, header)
return
}
otherRandom := header.Get(NodeRandomHeader)
if otherRandom == wn.RandomID {
// This is pretty harmless and some configurations of phonebooks or DNS records make this likely. Quietly filter it out.
wn.log.Debugf("new peer %#v has same node random id, am I talking to myself? %#v", addr, wn.RandomID)
return
}
otherTelemetryGUID = header.Get(TelemetryIDHeader)
otherPublicAddr = header.Get(AddressHeader)
// if UseXForwardedForAddressField is not empty, attempt to override the otherPublicAddr with the X Forwarded For origin
if forwardedAddr != nil {
newURL, err := wn.updateURLHost(otherPublicAddr, forwardedAddr)
if err != nil {
wn.log.Errorf("failed to up updateURLHost with error %v", err)
} else {
otherPublicAddr = newURL
}
}
otherInstanceName = header.Get(InstanceNameHeader)
ok = true
return
}
// update the provided url with the given originIP
func (wn *WebsocketNetwork) updateURLHost(originalRootURL string, originIP net.IP) (newAddress string, err error) {
if originIP == nil {
return "", nil
}
sourceURL, err := url.Parse(originalRootURL)
if err != nil {
wn.log.Errorf("unable to parse url: '%s', error: %v", originalRootURL, err)
return
}
port := sourceURL.Port()
host := originIP.String()
if originIP.To4() == nil {
// it's an IPv6
host = "[" + host + "]"
}
if port != "" {
host = host + ":" + port
}
sourceURL.Host = host
newAddress = sourceURL.String()
return
}
// ServerHTTP handles the gossip network functions over websockets
func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *http.Request) {
if wn.numIncomingPeers() >= wn.config.IncomingConnectionsLimit {
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_limit"})
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
Address: justHost(request.RemoteAddr),
HostName: request.Header.Get(TelemetryIDHeader),
Incoming: true,
InstanceName: request.Header.Get(InstanceNameHeader),
Reason: "Connection Limit",
})
response.WriteHeader(http.StatusServiceUnavailable)
return
}
remoteHost, _, err := net.SplitHostPort(request.RemoteAddr)
if err != nil {
wn.log.Errorf("could not parse request.RemoteAddr=%v, %s", request.RemoteAddr, err)
response.WriteHeader(http.StatusServiceUnavailable)
return
}
originIP := wn.getForwardedConnectionAddress(request.Header)
if originIP != nil {
remoteHost = originIP.String()
}
if wn.connectedForIP(remoteHost) >= wn.config.MaxConnectionsPerIP {
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_limit"})
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
Address: justHost(request.RemoteAddr),
HostName: request.Header.Get(TelemetryIDHeader),
Incoming: true,
InstanceName: request.Header.Get(InstanceNameHeader),
Reason: "Remote IP Connection Limit",
})
response.WriteHeader(http.StatusServiceUnavailable)
return
}
// TODO: rate limit incoming connections. (must wait at least Duration between disconnect and connect? no more than N connect attempts per Duration?)
wn.log.Debugf("inbound from %s", request.RemoteAddr)
ok, otherTelemetryGUID, otherPublicAddr, otherInstanceName := wn.checkHeaders(request.Header, request.RemoteAddr, originIP)
if !ok {
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "bad header"})
return
}
requestHeader := make(http.Header)
wn.setHeaders(requestHeader)
var challenge string
if wn.prioScheme != nil {
challenge = wn.prioScheme.NewPrioChallenge()
requestHeader.Set(PriorityChallengeHeader, challenge)
}
conn, err := wn.upgrader.Upgrade(response, request, requestHeader)
if err != nil {
wn.log.Info("ws upgrade fail ", err)
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "ws upgrade fail"})
return
}
peer := &wsPeer{
wsPeerCore: wsPeerCore{
net: wn,
rootURL: otherPublicAddr,
originAddress: remoteHost,
},
conn: conn,
outgoing: false,
InstanceName: otherInstanceName,
incomingMsgFilter: wn.incomingMsgFilter,
prioChallenge: challenge,
}
peer.TelemetryGUID = otherTelemetryGUID
peer.init(wn.config)
wn.addPeer(peer)
localAddr, _ := wn.Address()
wn.log.With("event", "ConnectedIn").With("remote", otherPublicAddr).With("local", localAddr).Infof("Accepted incoming connection from peer %s", otherPublicAddr)
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent,
telemetryspec.PeerEventDetails{
Address: justHost(request.RemoteAddr),
HostName: otherTelemetryGUID,
Incoming: true,
InstanceName: otherInstanceName,
})
}
func (wn *WebsocketNetwork) messageHandlerThread() {
defer wn.wg.Done()
inactivityCheckTicker := time.NewTicker(connectionActivityMonitorInterval)
defer inactivityCheckTicker.Stop()
for {
select {
case <-wn.ctx.Done():
return
case msg := <-wn.readBuffer:
if msg.processing != nil {
// The channel send should never block, but just in case..
select {
case msg.processing <- struct{}{}:
default:
wn.log.Warnf("could not send on msg.processing")
}
}
if wn.config.EnableOutgoingNetworkMessageFiltering && len(msg.Data) >= messageFilterSize {
wn.sendFilterMessage(msg)
}
//wn.log.Debugf("msg handling %#v [%d]byte", msg.Tag, len(msg.Data))
start := time.Now()
// now, send to global handlers
outmsg := wn.handlers.Handle(msg)
handled := time.Now()
bufferNanos := start.UnixNano() - msg.Received
networkIncomingBufferMicros.AddUint64(uint64(bufferNanos/1000), nil)
handleTime := handled.Sub(start)
networkHandleMicros.AddUint64(uint64(handleTime.Nanoseconds()/1000), nil)
switch outmsg.Action {
case Disconnect:
wn.wg.Add(1)
go wn.disconnectThread(msg.Sender, disconnectBadData)
case Broadcast:
wn.Broadcast(wn.ctx, msg.Tag, msg.Data, false, msg.Sender)
default:
}
case <-inactivityCheckTicker.C:
// go over the peers and ensure we have some type of communication going on.
wn.checkPeersConnectivity()
}
}
}
// checkPeersConnectivity tests the last timestamp where each of these
// peers was communicated with, and disconnect the peer if it has been too long since
// last time.
func (wn *WebsocketNetwork) checkPeersConnectivity() {
wn.peersLock.Lock()
defer wn.peersLock.Unlock()
currentTime := time.Now()
for _, peer := range wn.peers {
lastPacketTime := peer.GetLastPacketTime()
timeSinceLastPacket := currentTime.Sub(time.Unix(0, lastPacketTime))
if timeSinceLastPacket > maxPeerInactivityDuration {
wn.wg.Add(1)
go wn.disconnectThread(peer, disconnectIdleConn)
networkIdlePeerDrops.Inc(nil)
}
}
}
func (wn *WebsocketNetwork) sendFilterMessage(msg IncomingMessage) {
digest := generateMessageDigest(msg.Tag, msg.Data)
//wn.log.Debugf("send filter %s(%d) %v", msg.Tag, len(msg.Data), digest)
wn.Broadcast(context.Background(), protocol.MsgSkipTag, digest[:], false, msg.Sender)
}
func (wn *WebsocketNetwork) broadcastThread() {
defer wn.wg.Done()
var peers []*wsPeer
for {
// broadcast from high prio channel as long as we can
select {
case request := <-wn.broadcastQueueHighPrio:
wn.innerBroadcast(request, true, &peers)
continue
default:
}
// if nothing high prio, broadcast anything
select {
case request := <-wn.broadcastQueueHighPrio:
wn.innerBroadcast(request, true, &peers)
case request := <-wn.broadcastQueueBulk:
wn.innerBroadcast(request, false, &peers)
case <-wn.ctx.Done():
return
}
}
}
func (wn *WebsocketNetwork) peerSnapshot(dest []*wsPeer) []*wsPeer {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
if cap(dest) >= len(wn.peers) {
dest = dest[:len(wn.peers)]
} else {
dest = make([]*wsPeer, len(wn.peers))
}
copy(dest, wn.peers)
return dest
}
// prio is set if the broadcast is a high-priority broadcast.
func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool, ppeers *[]*wsPeer) {
broadcastQueueTime := time.Now().Sub(request.start)
networkBroadcastQueueMicros.AddUint64(uint64(broadcastQueueTime.Nanoseconds()/1000), nil)
start := time.Now()
tbytes := []byte(request.tag)
mbytes := make([]byte, len(tbytes)+len(request.data))
copy(mbytes, tbytes)
copy(mbytes[len(tbytes):], request.data)
var digest crypto.Digest
if request.tag != protocol.MsgSkipTag && len(request.data) >= messageFilterSize {
digest = crypto.Hash(mbytes)
}
*ppeers = wn.peerSnapshot(*ppeers)
peers := *ppeers
// first send to all the easy outbound peers who don't block, get them started.
for pi, peer := range peers {
if wn.config.BroadcastConnectionsLimit >= 0 && pi >= wn.config.BroadcastConnectionsLimit {
break
}
if peer == request.except {
peers[pi] = nil
continue
}
ok := peer.writeNonBlock(mbytes, prio, digest)
if ok {
peers[pi] = nil
continue
}
if prio {
// couldn't send a high prio message; give up
wn.log.Infof("dropping peer for being too slow to send to: %s, %d enqueued", peer.rootURL, len(peer.sendBufferHighPrio))
wn.removePeer(peer, disconnectTooSlow)
peer.Close()
networkSlowPeerDrops.Inc(nil)
} else {
networkBroadcastsDropped.Inc(nil)
}
}
dt := time.Now().Sub(start)
networkBroadcasts.Inc(nil)
networkBroadcastSendMicros.AddUint64(uint64(dt.Nanoseconds()/1000), nil)
if request.done != nil {
close(request.done)
}
}
// NumPeers returns number of peers we connect to (all peers incoming and outbound).
func (wn *WebsocketNetwork) NumPeers() int {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
return len(wn.peers)
}
func (wn *WebsocketNetwork) numOutgoingPeers() int {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
count := 0
for _, peer := range wn.peers {
if peer.outgoing {
count++
}
}
return count
}
func (wn *WebsocketNetwork) numIncomingPeers() int {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
count := 0
for _, peer := range wn.peers {
if !peer.outgoing {
count++
}
}
return count
}
// isConnectedTo returns true if addr matches any connected peer, based on the peer's root url.
func (wn *WebsocketNetwork) isConnectedTo(addr string) bool {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
for _, peer := range wn.peers {
if addr == peer.rootURL {
return true
}
}
return false
}
// connectedForIP returns number of peers with same host
func (wn *WebsocketNetwork) connectedForIP(host string) int {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
out := 0
for _, peer := range wn.peers {
if host == peer.OriginAddress() {
out++
}
}
return out
}
const meshThreadInterval = time.Minute
type meshRequest struct {
disconnect bool
done chan struct{}
}
func imin(a, b int) int {
if a < b {
return a
}
return b
}
// meshThread maintains the network, e.g. that we have sufficient connectivity to peers
func (wn *WebsocketNetwork) meshThread() {
defer wn.wg.Done()
timer := time.NewTicker(meshThreadInterval)
defer timer.Stop()
for {
var request meshRequest
select {
case <-timer.C:
request.disconnect = false
request.done = nil
case request = <-wn.meshUpdateRequests:
case <-wn.ctx.Done():
return
}
if request.disconnect {
wn.DisconnectPeers()
}
// TODO: only do DNS fetch every N seconds? Honor DNS TTL? Trust DNS library we're using to handle caching and TTL?
dnsAddrs := wn.getDNSAddrs()
if len(dnsAddrs) > 0 {
wn.log.Debugf("got %d dns addrs, %#v", len(dnsAddrs), dnsAddrs[:imin(5, len(dnsAddrs))])
wn.dnsPhonebook.ReplacePeerList(dnsAddrs)
mp, ok := wn.phonebook.(*MultiPhonebook)
if ok {
mp.AddPhonebook(&wn.dnsPhonebook)
}
} else {
wn.log.Debugf("got no DNS addrs for network %#v", wn.NetworkID)
}
desired := wn.config.GossipFanout
numOutgoing := wn.numOutgoingPeers() + wn.numOutgoingPending()
need := desired - numOutgoing
if need > 0 {
// get more than we need so that we can ignore duplicates
newAddrs := wn.phonebook.GetAddresses(desired + numOutgoing)
for _, na := range newAddrs {
if na == wn.config.PublicAddress {
continue
}
gossipAddr, ok := wn.tryConnectReserveAddr(na)
if ok {
wn.wg.Add(1)
go wn.tryConnect(na, gossipAddr)
need--
if need == 0 {
break
}
}
}
}
if request.done != nil {
close(request.done)
}
}
}
// prioWeightRefreshTime controls how often we refresh the weights
// of connected peers.
const prioWeightRefreshTime = time.Minute
// prioWeightRefresh periodically refreshes the weights of connected peers.
func (wn *WebsocketNetwork) prioWeightRefresh() {
defer wn.wg.Done()
ticker := time.NewTicker(prioWeightRefreshTime)
defer ticker.Stop()
var peers []*wsPeer
for {
select {
case <-ticker.C:
case <-wn.ctx.Done():
return
}
peers = wn.peerSnapshot(peers)
for _, peer := range peers {
wn.peersLock.RLock()
addr := peer.prioAddress
weight := peer.prioWeight
wn.peersLock.RUnlock()
newWeight := wn.prioScheme.GetPrioWeight(addr)
if newWeight != weight {
wn.peersLock.Lock()
wn.prioTracker.setPriority(peer, addr, newWeight)
wn.peersLock.Unlock()
}
}
}
}
// Wake up the thread to do work this often.
const pingThreadPeriod = 30 * time.Second
// If ping stats are older than this, don't include in metrics.
const maxPingAge = 30 * time.Minute
// pingThread wakes up periodically to refresh the ping times on peers and update the metrics gauges.
func (wn *WebsocketNetwork) pingThread() {
defer wn.wg.Done()
ticker := time.NewTicker(pingThreadPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-wn.ctx.Done():
return
}
sendList := wn.peersToPing()
wn.log.Debugf("ping %d peers...", len(sendList))
for _, peer := range sendList {
if !peer.sendPing() {
// if we failed to send a ping, see how long it was since last successfull ping.
lastPingSent, _ := peer.pingTimes()
wn.log.Infof("failed to ping to %v for the past %f seconds", peer, time.Now().Sub(lastPingSent).Seconds())
}
}
}
}
// Walks list of peers, gathers list of peers to ping, also calculates statistics.
func (wn *WebsocketNetwork) peersToPing() []*wsPeer {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
// Never flood outbound traffic by trying to ping all the peers at once.
// Send to at most one fifth of the peers.
maxSend := 1 + (len(wn.peers) / 5)
out := make([]*wsPeer, 0, maxSend)
now := time.Now()
// a list to sort to find median
times := make([]float64, 0, len(wn.peers))
var min = math.MaxFloat64
var max float64
var sum float64
pingPeriod := time.Duration(wn.config.PeerPingPeriodSeconds) * time.Second
for _, peer := range wn.peers {
lastPingSent, lastPingRoundTripTime := peer.pingTimes()
sendToNow := now.Sub(lastPingSent)
if (sendToNow > pingPeriod) && (len(out) < maxSend) {
out = append(out, peer)
}
if (lastPingRoundTripTime > 0) && (sendToNow < maxPingAge) {
ftime := lastPingRoundTripTime.Seconds()
sum += ftime
times = append(times, ftime)
if ftime < min {
min = ftime
}
if ftime > max {
max = ftime
}
}
}
if len(times) != 0 {
sort.Float64s(times)
median := times[len(times)/2]
medianPing.Set(median, nil)
mean := sum / float64(len(times))
meanPing.Set(mean, nil)
minPing.Set(min, nil)
maxPing.Set(max, nil)
wn.log.Infof("ping times min=%f mean=%f median=%f max=%f", min, mean, median, max)
}
return out
}
func (wn *WebsocketNetwork) getDNSAddrs() []string {
dnsBootstrap := wn.config.DNSBootstrap(wn.NetworkID)
srvPhonebook, err := wn.readFromBootstrap(dnsBootstrap)
if err != nil {
// only log this warning on testnet or devnet
if wn.NetworkID == config.Devnet || wn.NetworkID == config.Testnet {
wn.log.Warnf("Cannot lookup SRV record for %s: %v", dnsBootstrap, err)
}
return nil
}
return srvPhonebook
}
func (wn *WebsocketNetwork) readFromBootstrap(bootstrapID string) (addrs []string, err error) {
if bootstrapID == "" {
wn.log.Debug("no dns lookup due to empty bootstrapID")
return
}
_, records, sysLookupErr := net.LookupSRV("algobootstrap", "tcp", bootstrapID)
if sysLookupErr != nil {
var resolver tools_network.Resolver
// try to resolve the address. If it's an dotted-numbers format, it would return that right away.
// if it's a named address, we would attempt to parse it and might fail.
// ( failing isn't that bad; the resolver would internally try to use 8.8.8.8 instead )
if DNSIPAddr, err2 := net.ResolveIPAddr("ip", wn.config.FallbackDNSResolverAddress); err2 == nil {
resolver.DNSAddress = *DNSIPAddr
} else {
wn.log.Infof("readFromBootstrap: Failed to resolve fallback DNS resolver address '%s': %v; falling back to default fallback resolver address", wn.config.FallbackDNSResolverAddress, err2)
}
_, records, err = resolver.LookupSRV(context.Background(), "algobootstrap", "tcp", bootstrapID)
if err != nil {
wn.log.Warnf("readFromBootstrap: DNS LookupSRV failed when using system resolver(%v) as well as via %s due to %v", sysLookupErr, resolver.EffectiveResolverDNS(), err)
return
}
// we succeeded when using the public dns. log this.
wn.log.Infof("readFromBootstrap: DNS LookupSRV failed when using the system resolver(%v); using public DNS(%s) server directly instead.", sysLookupErr, resolver.EffectiveResolverDNS())
}
for _, srv := range records {
// empty target won't take us far; skip these
if srv.Target == "" {
continue
}
// according to the SRV spec, each target need to end with a dot. While this would make a valid host name, including the
// last dot could lead to a non-canonical domain name representation, which would better get avoided.
if srv.Target[len(srv.Target)-1:] == "." {
srv.Target = srv.Target[:len(srv.Target)-1]
}
addrs = append(addrs, fmt.Sprintf("%s:%d", srv.Target, srv.Port))
}
return
}
// ProtocolVersionHeader HTTP header for protocol version. TODO: this may be unneeded redundance since we also have url versioning "/v1/..."
const ProtocolVersionHeader = "X-Algorand-Version"
// ProtocolVersion is the current version attached to the ProtocolVersionHeader header
const ProtocolVersion = "1"
// TelemetryIDHeader HTTP header for telemetry-id for logging
const TelemetryIDHeader = "X-Algorand-TelId"
// GenesisHeader HTTP header for genesis id to make sure we're on the same chain
const GenesisHeader = "X-Algorand-Genesis"
// NodeRandomHeader HTTP header that a node uses to make sure it's not talking to itself
const NodeRandomHeader = "X-Algorand-NodeRandom"
// AddressHeader HTTP header by which an inbound connection reports its public address
const AddressHeader = "X-Algorand-Location"
// InstanceNameHeader HTTP header by which an inbound connection reports an ID to distinguish multiple local nodes.
const InstanceNameHeader = "X-Algorand-InstanceName"
// PriorityChallengeHeader HTTP header informs a client about the challenge it should sign to increase network priority.
const PriorityChallengeHeader = "X-Algorand-PriorityChallenge"
var websocketsScheme = map[string]string{"http": "ws", "https": "wss"}
var errBadAddr = errors.New("bad address")
var errNetworkClosing = errors.New("WebsocketNetwork shutting down")
var errBcastCallerCancel = errors.New("caller cancelled broadcast")
var errBcastQFull = errors.New("broadcast queue full")
// HostColonPortPattern matches "^[^:]+:\\d+$" e.g. "foo.com.:1234"
var HostColonPortPattern = regexp.MustCompile("^[^:]+:\\d+$")
// ParseHostOrURL handles "host:port" or a full URL.
// Standard library net/url.Parse chokes on "host:port".
func ParseHostOrURL(addr string) (*url.URL, error) {
var parsedURL *url.URL
if HostColonPortPattern.MatchString(addr) {
parsedURL = &url.URL{Scheme: "http", Host: addr}
return parsedURL, nil
}
return url.Parse(addr)
}
// addrToGossipAddr parses host:port or a URL and returns the URL to the websocket interface at that address.
func (wn *WebsocketNetwork) addrToGossipAddr(addr string) (string, error) {
parsedURL, err := ParseHostOrURL(addr)
if err != nil {
wn.log.Warnf("could not parse addr %#v: %s", addr, err)
return "", errBadAddr
}
parsedURL.Scheme = websocketsScheme[parsedURL.Scheme]
if parsedURL.Scheme == "" {
parsedURL.Scheme = "ws"
}
parsedURL.Path = strings.Replace(path.Join(parsedURL.Path, GossipNetworkPath), "{genesisID}", wn.GenesisID, -1)
return parsedURL.String(), nil
}
// tryConnectReserveAddr synchronously checks that addr is not already being connected to, returns (websocket URL or "", true if connection may procede)
func (wn *WebsocketNetwork) tryConnectReserveAddr(addr string) (gossipAddr string, ok bool) {
wn.tryConnectLock.Lock()
defer wn.tryConnectLock.Unlock()
_, exists := wn.tryConnectAddrs[addr]
if exists {
return "", false
}
gossipAddr, err := wn.addrToGossipAddr(addr)
if err != nil {
return "", false
}
_, exists = wn.tryConnectAddrs[gossipAddr]
if exists {
return "", false
}
// WARNING: isConnectedTo takes wn.peersLock; to avoid deadlock, never try to take wn.peersLock outside an attempt to lock wn.tryConnectLock
if wn.isConnectedTo(addr) {
return "", false
}
now := time.Now().Unix()
wn.tryConnectAddrs[addr] = now
wn.tryConnectAddrs[gossipAddr] = now
return gossipAddr, true
}
// tryConnectReleaseAddr should be called when connection succedes and becomes a peer or fails and is no longer being attempted
func (wn *WebsocketNetwork) tryConnectReleaseAddr(addr, gossipAddr string) {
wn.tryConnectLock.Lock()
defer wn.tryConnectLock.Unlock()
delete(wn.tryConnectAddrs, addr)
delete(wn.tryConnectAddrs, gossipAddr)
}
func (wn *WebsocketNetwork) numOutgoingPending() int {
wn.tryConnectLock.Lock()
defer wn.tryConnectLock.Unlock()
return len(wn.tryConnectAddrs)
}
var websocketDialer = websocket.Dialer{
Proxy: http.ProxyFromEnvironment,
HandshakeTimeout: 45 * time.Second,
EnableCompression: false,
}
// tryConnect opens websocket connection and checks initial connection parameters.
// addr should be 'host:port' or a URL, gossipAddr is the websocket endpoint URL
func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
defer wn.tryConnectReleaseAddr(addr, gossipAddr)
defer func() {
if xpanic := recover(); xpanic != nil {
wn.log.Errorf("panic in tryConnect: %v", xpanic)
}
}()
defer wn.wg.Done()
requestHeader := make(http.Header)
wn.setHeaders(requestHeader)
myInstanceName := wn.log.GetInstanceName()
requestHeader.Set(InstanceNameHeader, myInstanceName)
conn, response, err := websocketDialer.DialContext(wn.ctx, gossipAddr, requestHeader)
if err != nil {
wn.log.Warnf("ws connect(%s) fail: %s", gossipAddr, err)
return
}
// no need to test the response.StatusCode since we know it's going to be http.StatusSwitchingProtocols, as it's already being tested inside websocketDialer.DialContext.
ok, otherTelemetryGUID, _, _ := wn.checkHeaders(response.Header, gossipAddr, nil)
if !ok {
return
}
peer := &wsPeer{wsPeerCore: wsPeerCore{net: wn, rootURL: addr}, conn: conn, outgoing: true, incomingMsgFilter: wn.incomingMsgFilter}
peer.TelemetryGUID = otherTelemetryGUID
peer.init(wn.config)
wn.addPeer(peer)
localAddr, _ := wn.Address()
wn.log.With("event", "ConnectedOut").With("remote", addr).With("local", localAddr).Infof("Made outgoing connection to peer %v", addr)
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent,
telemetryspec.PeerEventDetails{
Address: justHost(conn.RemoteAddr().String()),
HostName: otherTelemetryGUID,
Incoming: false,
InstanceName: myInstanceName,
})
if wn.prioScheme != nil {
challenge := response.Header.Get(PriorityChallengeHeader)
if challenge != "" {
resp := wn.prioScheme.MakePrioResponse(challenge)
if resp != nil {
mbytes := append([]byte(protocol.NetPrioResponseTag), resp...)
sent := peer.writeNonBlock(mbytes, true, crypto.Digest{})
if !sent {
wn.log.With("remote", addr).With("local", localAddr).Warnf("could not send priority response to %v", addr)
}
}
}
}
}
// NewWebsocketNetwork constructor for websockets based gossip network
func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebook Phonebook, genesisID string, networkID protocol.NetworkID) (wn *WebsocketNetwork, err error) {
outerPhonebook := &MultiPhonebook{phonebooks: []Phonebook{phonebook}}
wn = &WebsocketNetwork{log: log, config: config, phonebook: outerPhonebook, GenesisID: genesisID, NetworkID: networkID}
// TODO - add config parameter to allow non-relays to enable relaying.
wn.relayMessages = config.NetAddress != ""
wn.setup()
return wn, nil
}
// NewWebsocketGossipNode constructs a websocket network node and returns it as a GossipNode interface implementation
func NewWebsocketGossipNode(log logging.Logger, config config.Local, phonebook Phonebook, genesisID string, networkID protocol.NetworkID) (gn GossipNode, err error) {
return NewWebsocketNetwork(log, config, phonebook, genesisID, networkID)
}
// SetPrioScheme specifies the network priority scheme for a network node
func (wn *WebsocketNetwork) SetPrioScheme(s NetPrioScheme) {
wn.prioScheme = s
}
// called from wsPeer to report that it has closed
func (wn *WebsocketNetwork) peerRemoteClose(peer *wsPeer, reason disconnectReason) {
wn.removePeer(peer, reason)
}
func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
// first logging, then take the lock and do the actual accounting.
// definitely don't change this to do the logging while holding the lock.
localAddr, _ := wn.Address()
wn.log.With("event", "Disconnected").With("remote", peer.rootURL).With("local", localAddr).Infof("Peer %v disconnected", peer.rootURL)
peerAddr := ""
// we might be able to get addr out of conn, or it might be closed
if peer.conn != nil {
paddr := peer.conn.RemoteAddr()
if paddr != nil {
peerAddr = justHost(paddr.String())
}
}
if peerAddr == "" {
// didn't get addr from peer, try from url
url, err := url.Parse(peer.rootURL)
if err == nil {
peerAddr = justHost(url.Host)
} else {
// use whatever it is
peerAddr = justHost(peer.rootURL)
}
}
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.DisconnectPeerEvent,
telemetryspec.DisconnectPeerEventDetails{
PeerEventDetails: telemetryspec.PeerEventDetails{
Address: peerAddr,
HostName: peer.TelemetryGUID,
Incoming: !peer.outgoing,
InstanceName: peer.InstanceName,
},
Reason: string(reason),
})
wn.peersLock.Lock()
defer wn.peersLock.Unlock()
if peer.peerIndex < len(wn.peers) && wn.peers[peer.peerIndex] == peer {
heap.Remove(peersHeap{wn}, peer.peerIndex)
wn.prioTracker.removePeer(peer)
}
wn.countPeersSetGauges()
}
func (wn *WebsocketNetwork) addPeer(peer *wsPeer) {
wn.peersLock.Lock()
defer wn.peersLock.Unlock()
for _, p := range wn.peers {
if p == peer {
wn.log.Error("dup peer added %#v", peer)
return
}
}
heap.Push(peersHeap{wn}, peer)
wn.prioTracker.setPriority(peer, peer.prioAddress, peer.prioWeight)
wn.countPeersSetGauges()
if len(wn.peers) >= wn.config.GossipFanout {
// we have a quorum of connected peers, if we weren't ready before, we are now
if atomic.CompareAndSwapInt32(&wn.ready, 0, 1) {
wn.log.Debug("ready")
close(wn.readyChan)
}
} else if atomic.LoadInt32(&wn.ready) == 0 {
// but if we're not ready in a minute, call whatever peers we've got as good enough
wn.wg.Add(1)
go wn.eventualReady()
}
}
func (wn *WebsocketNetwork) eventualReady() {
defer wn.wg.Done()
minute := time.NewTimer(wn.eventualReadyDelay)
select {
case <-wn.ctx.Done():
case <-minute.C:
if atomic.CompareAndSwapInt32(&wn.ready, 0, 1) {
wn.log.Debug("ready")
close(wn.readyChan)
}
}
}
// should be run from inside a context holding wn.peersLock
func (wn *WebsocketNetwork) countPeersSetGauges() {
numIn := 0
numOut := 0
for _, xp := range wn.peers {
if xp.outgoing {
numOut++
} else {
numIn++
}
}
networkIncomingConnections.Set(float64(numIn), nil)
networkOutgoingConnections.Set(float64(numOut), nil)
}
func justHost(hostPort string) string {
host, _, err := net.SplitHostPort(hostPort)
if err != nil {
return hostPort
}
return host
}
| 1 | 35,195 | I think more idiomatic (and consistent with elsewhere in our code base, and more natural to read as "25 seconds") is `25 * time.Second` | algorand-go-algorand | go |
@@ -127,6 +127,7 @@ func NewDecoder(opts ...func(*Decoder)) *Decoder {
d := &Decoder{
MarshalOptions: MarshalOptions{
SupportJSONTags: true,
+ SupportYAMLTags: true,
},
}
for _, o := range opts { | 1 | package dynamodbattribute
import (
"encoding/base64"
"fmt"
"reflect"
"strconv"
"time"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
// An Unmarshaler is an interface to provide custom unmarshaling of
// AttributeValues. Use this to provide custom logic determining
// how AttributeValues should be unmarshaled.
// type ExampleUnmarshaler struct {
// Value int
// }
//
// func (u *exampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
// if av.N == nil {
// return nil
// }
//
// n, err := strconv.ParseInt(*av.N, 10, 0)
// if err != nil {
// return err
// }
//
// u.Value = n
// return nil
// }
type Unmarshaler interface {
UnmarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
}
// Unmarshal will unmarshal DynamoDB AttributeValues to Go value types.
// Both generic interface{} and concrete types are valid unmarshal
// destination types.
//
// Unmarshal will allocate maps, slices, and pointers as needed to
// unmarshal the AttributeValue into the provided type value.
//
// When unmarshaling AttributeValues into structs Unmarshal matches
// the field names of the struct to the AttributeValue Map keys.
// Initially it will look for exact field name matching, but will
// fall back to case insensitive if not exact match is found.
//
// With the exception of omitempty, omitemptyelem, binaryset, numberset
// and stringset all struct tags used by Marshal are also used by
// Unmarshal.
//
// When decoding AttributeValues to interfaces Unmarshal will use the
// following types.
//
// []byte, AV Binary (B)
// [][]byte, AV Binary Set (BS)
// bool, AV Boolean (BOOL)
// []interface{}, AV List (L)
// map[string]interface{}, AV Map (M)
// float64, AV Number (N)
// Number, AV Number (N) with UseNumber set
// []float64, AV Number Set (NS)
// []Number, AV Number Set (NS) with UseNumber set
// string, AV String (S)
// []string, AV String Set (SS)
//
// If the Decoder option, UseNumber is set numbers will be unmarshaled
// as Number values instead of float64. Use this to maintain the original
// string formating of the number as it was represented in the AttributeValue.
// In addition provides additional opportunities to parse the number
// string based on individual use cases.
//
// When unmarshaling any error that occurs will halt the unmarshal
// and return the error.
//
// The output value provided must be a non-nil pointer
func Unmarshal(av *dynamodb.AttributeValue, out interface{}) error {
return NewDecoder().Decode(av, out)
}
// UnmarshalMap is an alias for Unmarshal which unmarshals from
// a map of AttributeValues.
//
// The output value provided must be a non-nil pointer
func UnmarshalMap(m map[string]*dynamodb.AttributeValue, out interface{}) error {
return NewDecoder().Decode(&dynamodb.AttributeValue{M: m}, out)
}
// UnmarshalList is an alias for Unmarshal func which unmarshals
// a slice of AttributeValues.
//
// The output value provided must be a non-nil pointer
func UnmarshalList(l []*dynamodb.AttributeValue, out interface{}) error {
return NewDecoder().Decode(&dynamodb.AttributeValue{L: l}, out)
}
// UnmarshalListOfMaps is an alias for Unmarshal func which unmarshals a
// slice of maps of attribute values.
//
// This is useful for when you need to unmarshal the Items from a DynamoDB
// Query API call.
//
// The output value provided must be a non-nil pointer
func UnmarshalListOfMaps(l []map[string]*dynamodb.AttributeValue, out interface{}) error {
items := make([]*dynamodb.AttributeValue, len(l))
for i, m := range l {
items[i] = &dynamodb.AttributeValue{M: m}
}
return UnmarshalList(items, out)
}
// A Decoder provides unmarshaling AttributeValues to Go value types.
type Decoder struct {
MarshalOptions
// Instructs the decoder to decode AttributeValue Numbers as
// Number type instead of float64 when the destination type
// is interface{}. Similar to encoding/json.Number
UseNumber bool
}
// NewDecoder creates a new Decoder with default configuration. Use
// the `opts` functional options to override the default configuration.
func NewDecoder(opts ...func(*Decoder)) *Decoder {
d := &Decoder{
MarshalOptions: MarshalOptions{
SupportJSONTags: true,
},
}
for _, o := range opts {
o(d)
}
return d
}
// Decode will unmarshal an AttributeValue into a Go value type. An error
// will be return if the decoder is unable to unmarshal the AttributeValue
// to the provide Go value type.
//
// The output value provided must be a non-nil pointer
func (d *Decoder) Decode(av *dynamodb.AttributeValue, out interface{}, opts ...func(*Decoder)) error {
v := reflect.ValueOf(out)
if v.Kind() != reflect.Ptr || v.IsNil() || !v.IsValid() {
return &InvalidUnmarshalError{Type: reflect.TypeOf(out)}
}
return d.decode(av, v, tag{})
}
var stringInterfaceMapType = reflect.TypeOf(map[string]interface{}(nil))
var byteSliceType = reflect.TypeOf([]byte(nil))
var byteSliceSlicetype = reflect.TypeOf([][]byte(nil))
var numberType = reflect.TypeOf(Number(""))
var timeType = reflect.TypeOf(time.Time{})
func (d *Decoder) decode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
var u Unmarshaler
if av == nil || av.NULL != nil {
u, v = indirect(v, true)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(av)
}
return d.decodeNull(v)
}
u, v = indirect(v, false)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(av)
}
switch {
case len(av.B) != 0:
return d.decodeBinary(av.B, v)
case av.BOOL != nil:
return d.decodeBool(av.BOOL, v)
case len(av.BS) != 0:
return d.decodeBinarySet(av.BS, v)
case len(av.L) != 0:
return d.decodeList(av.L, v)
case len(av.M) != 0:
return d.decodeMap(av.M, v)
case av.N != nil:
return d.decodeNumber(av.N, v, fieldTag)
case len(av.NS) != 0:
return d.decodeNumberSet(av.NS, v)
case av.S != nil:
return d.decodeString(av.S, v, fieldTag)
case len(av.SS) != 0:
return d.decodeStringSet(av.SS, v)
}
return nil
}
func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error {
if v.Kind() == reflect.Interface {
buf := make([]byte, len(b))
copy(buf, b)
v.Set(reflect.ValueOf(buf))
return nil
}
if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
}
if v.Type() == byteSliceType {
// Optimization for []byte types
if v.IsNil() || v.Cap() < len(b) {
v.Set(reflect.MakeSlice(byteSliceType, len(b), len(b)))
} else if v.Len() != len(b) {
v.SetLen(len(b))
}
copy(v.Interface().([]byte), b)
return nil
}
switch v.Type().Elem().Kind() {
case reflect.Uint8:
// Fallback to reflection copy for type aliased of []byte type
if v.Kind() != reflect.Array && (v.IsNil() || v.Cap() < len(b)) {
v.Set(reflect.MakeSlice(v.Type(), len(b), len(b)))
} else if v.Len() != len(b) {
v.SetLen(len(b))
}
for i := 0; i < len(b); i++ {
v.Index(i).SetUint(uint64(b[i]))
}
default:
if v.Kind() == reflect.Array {
switch v.Type().Elem().Kind() {
case reflect.Uint8:
reflect.Copy(v, reflect.ValueOf(b))
default:
return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
}
break
}
return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
}
return nil
}
func (d *Decoder) decodeBool(b *bool, v reflect.Value) error {
switch v.Kind() {
case reflect.Bool, reflect.Interface:
v.Set(reflect.ValueOf(*b).Convert(v.Type()))
default:
return &UnmarshalTypeError{Value: "bool", Type: v.Type()}
}
return nil
}
func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error {
isArray := false
switch v.Kind() {
case reflect.Slice:
// Make room for the slice elements if needed
if v.IsNil() || v.Cap() < len(bs) {
// What about if ignoring nil/empty values?
v.Set(reflect.MakeSlice(v.Type(), 0, len(bs)))
}
case reflect.Array:
// Limited to capacity of existing array.
isArray = true
case reflect.Interface:
set := make([][]byte, len(bs))
for i, b := range bs {
if err := d.decodeBinary(b, reflect.ValueOf(&set[i]).Elem()); err != nil {
return err
}
}
v.Set(reflect.ValueOf(set))
return nil
default:
return &UnmarshalTypeError{Value: "binary set", Type: v.Type()}
}
for i := 0; i < v.Cap() && i < len(bs); i++ {
if !isArray {
v.SetLen(i + 1)
}
u, elem := indirect(v.Index(i), false)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{BS: bs})
}
if err := d.decodeBinary(bs[i], elem); err != nil {
return err
}
}
return nil
}
func (d *Decoder) decodeNumber(n *string, v reflect.Value, fieldTag tag) error {
switch v.Kind() {
case reflect.Interface:
i, err := d.decodeNumberToInterface(n)
if err != nil {
return err
}
v.Set(reflect.ValueOf(i))
return nil
case reflect.String:
if v.Type() == numberType { // Support Number value type
v.Set(reflect.ValueOf(Number(*n)))
return nil
}
v.Set(reflect.ValueOf(*n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i, err := strconv.ParseInt(*n, 10, 64)
if err != nil {
return err
}
if v.OverflowInt(i) {
return &UnmarshalTypeError{
Value: fmt.Sprintf("number overflow, %s", *n),
Type: v.Type(),
}
}
v.SetInt(i)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
i, err := strconv.ParseUint(*n, 10, 64)
if err != nil {
return err
}
if v.OverflowUint(i) {
return &UnmarshalTypeError{
Value: fmt.Sprintf("number overflow, %s", *n),
Type: v.Type(),
}
}
v.SetUint(i)
case reflect.Float32, reflect.Float64:
i, err := strconv.ParseFloat(*n, 64)
if err != nil {
return err
}
if v.OverflowFloat(i) {
return &UnmarshalTypeError{
Value: fmt.Sprintf("number overflow, %s", *n),
Type: v.Type(),
}
}
v.SetFloat(i)
default:
if v.Type().ConvertibleTo(timeType) && fieldTag.AsUnixTime {
t, err := decodeUnixTime(*n)
if err != nil {
return err
}
v.Set(reflect.ValueOf(t).Convert(v.Type()))
return nil
}
return &UnmarshalTypeError{Value: "number", Type: v.Type()}
}
return nil
}
func (d *Decoder) decodeNumberToInterface(n *string) (interface{}, error) {
if d.UseNumber {
return Number(*n), nil
}
// Default to float64 for all numbers
return strconv.ParseFloat(*n, 64)
}
func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error {
isArray := false
switch v.Kind() {
case reflect.Slice:
// Make room for the slice elements if needed
if v.IsNil() || v.Cap() < len(ns) {
// What about if ignoring nil/empty values?
v.Set(reflect.MakeSlice(v.Type(), 0, len(ns)))
}
case reflect.Array:
// Limited to capacity of existing array.
isArray = true
case reflect.Interface:
if d.UseNumber {
set := make([]Number, len(ns))
for i, n := range ns {
if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
return err
}
}
v.Set(reflect.ValueOf(set))
} else {
set := make([]float64, len(ns))
for i, n := range ns {
if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
return err
}
}
v.Set(reflect.ValueOf(set))
}
return nil
default:
return &UnmarshalTypeError{Value: "number set", Type: v.Type()}
}
for i := 0; i < v.Cap() && i < len(ns); i++ {
if !isArray {
v.SetLen(i + 1)
}
u, elem := indirect(v.Index(i), false)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{NS: ns})
}
if err := d.decodeNumber(ns[i], elem, tag{}); err != nil {
return err
}
}
return nil
}
func (d *Decoder) decodeList(avList []*dynamodb.AttributeValue, v reflect.Value) error {
isArray := false
switch v.Kind() {
case reflect.Slice:
// Make room for the slice elements if needed
if v.IsNil() || v.Cap() < len(avList) {
// What about if ignoring nil/empty values?
v.Set(reflect.MakeSlice(v.Type(), 0, len(avList)))
}
case reflect.Array:
// Limited to capacity of existing array.
isArray = true
case reflect.Interface:
s := make([]interface{}, len(avList))
for i, av := range avList {
if err := d.decode(av, reflect.ValueOf(&s[i]).Elem(), tag{}); err != nil {
return err
}
}
v.Set(reflect.ValueOf(s))
return nil
default:
return &UnmarshalTypeError{Value: "list", Type: v.Type()}
}
// If v is not a slice, array
for i := 0; i < v.Cap() && i < len(avList); i++ {
if !isArray {
v.SetLen(i + 1)
}
if err := d.decode(avList[i], v.Index(i), tag{}); err != nil {
return err
}
}
return nil
}
func (d *Decoder) decodeMap(avMap map[string]*dynamodb.AttributeValue, v reflect.Value) error {
switch v.Kind() {
case reflect.Map:
t := v.Type()
if t.Key().Kind() != reflect.String {
return &UnmarshalTypeError{Value: "map string key", Type: t.Key()}
}
if v.IsNil() {
v.Set(reflect.MakeMap(t))
}
case reflect.Struct:
case reflect.Interface:
v.Set(reflect.MakeMap(stringInterfaceMapType))
v = v.Elem()
default:
return &UnmarshalTypeError{Value: "map", Type: v.Type()}
}
if v.Kind() == reflect.Map {
for k, av := range avMap {
key := reflect.ValueOf(k)
elem := reflect.New(v.Type().Elem()).Elem()
if err := d.decode(av, elem, tag{}); err != nil {
return err
}
v.SetMapIndex(key, elem)
}
} else if v.Kind() == reflect.Struct {
fields := unionStructFields(v.Type(), d.MarshalOptions)
for k, av := range avMap {
if f, ok := fieldByName(fields, k); ok {
fv := fieldByIndex(v, f.Index, func(v *reflect.Value) bool {
v.Set(reflect.New(v.Type().Elem()))
return true // to continue the loop.
})
if err := d.decode(av, fv, f.tag); err != nil {
return err
}
}
}
}
return nil
}
func (d *Decoder) decodeNull(v reflect.Value) error {
if v.IsValid() && v.CanSet() {
v.Set(reflect.Zero(v.Type()))
}
return nil
}
func (d *Decoder) decodeString(s *string, v reflect.Value, fieldTag tag) error {
if fieldTag.AsString {
return d.decodeNumber(s, v, fieldTag)
}
// To maintain backwards compatibility with ConvertFrom family of methods which
// converted strings to time.Time structs
if v.Type().ConvertibleTo(timeType) {
t, err := time.Parse(time.RFC3339, *s)
if err != nil {
return err
}
v.Set(reflect.ValueOf(t).Convert(v.Type()))
return nil
}
switch v.Kind() {
case reflect.String:
v.SetString(*s)
case reflect.Slice:
// To maintain backwards compatibility with the ConvertFrom family of methods
// which converted []byte into base64-encoded strings if the input was typed
if v.Type() == byteSliceType {
decoded, err := base64.StdEncoding.DecodeString(*s)
if err != nil {
return &UnmarshalError{Err: err, Value: "string", Type: v.Type()}
}
v.SetBytes(decoded)
}
case reflect.Interface:
// Ensure type aliasing is handled properly
v.Set(reflect.ValueOf(*s).Convert(v.Type()))
default:
return &UnmarshalTypeError{Value: "string", Type: v.Type()}
}
return nil
}
func (d *Decoder) decodeStringSet(ss []*string, v reflect.Value) error {
isArray := false
switch v.Kind() {
case reflect.Slice:
// Make room for the slice elements if needed
if v.IsNil() || v.Cap() < len(ss) {
v.Set(reflect.MakeSlice(v.Type(), 0, len(ss)))
}
case reflect.Array:
// Limited to capacity of existing array.
isArray = true
case reflect.Interface:
set := make([]string, len(ss))
for i, s := range ss {
if err := d.decodeString(s, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
return err
}
}
v.Set(reflect.ValueOf(set))
return nil
default:
return &UnmarshalTypeError{Value: "string set", Type: v.Type()}
}
for i := 0; i < v.Cap() && i < len(ss); i++ {
if !isArray {
v.SetLen(i + 1)
}
u, elem := indirect(v.Index(i), false)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{SS: ss})
}
if err := d.decodeString(ss[i], elem, tag{}); err != nil {
return err
}
}
return nil
}
func decodeUnixTime(n string) (time.Time, error) {
v, err := strconv.ParseInt(n, 10, 64)
if err != nil {
return time.Time{}, &UnmarshalError{
Err: err, Value: n, Type: timeType,
}
}
return time.Unix(v, 0), nil
}
// indirect will walk a value's interface or pointer value types. Returning
// the final value or the value a unmarshaler is defined on.
//
// Based on the enoding/json type reflect value type indirection in Go Stdlib
// https://golang.org/src/encoding/json/decode.go indirect func.
func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(Unmarshaler); ok {
return u, reflect.Value{}
}
}
v = v.Elem()
}
return nil, v
}
// A Number represents a Attributevalue number literal.
type Number string
// Float64 attempts to cast the number ot a float64, returning
// the result of the case or error if the case failed.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 attempts to cast the number ot a int64, returning
// the result of the case or error if the case failed.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
// Uint64 attempts to cast the number ot a uint64, returning
// the result of the case or error if the case failed.
func (n Number) Uint64() (uint64, error) {
return strconv.ParseUint(string(n), 10, 64)
}
// String returns the raw number represented as a string
func (n Number) String() string {
return string(n)
}
type emptyOrigError struct{}
func (e emptyOrigError) OrigErr() error {
return nil
}
// An UnmarshalTypeError is an error type representing a error
// unmarshaling the AttributeValue's element to a Go value type.
// Includes details about the AttributeValue type and Go value type.
type UnmarshalTypeError struct {
emptyOrigError
Value string
Type reflect.Type
}
// Error returns the string representation of the error.
// satisfying the error interface
func (e *UnmarshalTypeError) Error() string {
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
}
// Code returns the code of the error, satisfying the awserr.Error
// interface.
func (e *UnmarshalTypeError) Code() string {
return "UnmarshalTypeError"
}
// Message returns the detailed message of the error, satisfying
// the awserr.Error interface.
func (e *UnmarshalTypeError) Message() string {
return "cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}
// An InvalidUnmarshalError is an error type representing an invalid type
// encountered while unmarshaling a AttributeValue to a Go value type.
type InvalidUnmarshalError struct {
emptyOrigError
Type reflect.Type
}
// Error returns the string representation of the error.
// satisfying the error interface
func (e *InvalidUnmarshalError) Error() string {
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
}
// Code returns the code of the error, satisfying the awserr.Error
// interface.
func (e *InvalidUnmarshalError) Code() string {
return "InvalidUnmarshalError"
}
// Message returns the detailed message of the error, satisfying
// the awserr.Error interface.
func (e *InvalidUnmarshalError) Message() string {
if e.Type == nil {
return "cannot unmarshal to nil value"
}
if e.Type.Kind() != reflect.Ptr {
return "cannot unmarshal to non-pointer value, got " + e.Type.String()
}
return "cannot unmarshal to nil value, " + e.Type.String()
}
// An UnmarshalError wraps an error that occured while unmarshaling a DynamoDB
// AttributeValue element into a Go type. This is different from UnmarshalTypeError
// in that it wraps the underlying error that occured.
type UnmarshalError struct {
Err error
Value string
Type reflect.Type
}
// Error returns the string representation of the error.
// satisfying the error interface.
func (e *UnmarshalError) Error() string {
return fmt.Sprintf("%s: %s\ncaused by: %v", e.Code(), e.Message(), e.Err)
}
// OrigErr returns the original error that caused this issue.
func (e UnmarshalError) OrigErr() error {
return e.Err
}
// Code returns the code of the error, satisfying the awserr.Error
// interface.
func (e *UnmarshalError) Code() string {
return "UnmarshalError"
}
// Message returns the detailed message of the error, satisfying
// the awserr.Error interface.
func (e *UnmarshalError) Message() string {
return fmt.Sprintf("cannot unmarshal %q into %s.",
e.Value, e.Type.String())
}
| 1 | 9,337 | Enabling `YAML` by default would be a breaking change in behavior for some applications if the struct's used by that application already include YAML tags, but their application has been (un)marshaling DynamoDB Attributes based on the struct name. | aws-aws-sdk-go | go |
@@ -131,7 +131,7 @@ public class DriverCommandExecutor extends HttpCommandExecutor implements Closea
Thread.currentThread().interrupt();
throw new WebDriverException("Timed out waiting for driver server to stop.", e);
} finally {
- executorService.shutdownNow();
+ executorService.shutdown();
}
} else { | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.service;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Throwables;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.remote.Command;
import org.openqa.selenium.remote.CommandInfo;
import org.openqa.selenium.remote.DriverCommand;
import org.openqa.selenium.remote.HttpCommandExecutor;
import org.openqa.selenium.remote.Response;
import java.io.Closeable;
import java.io.IOException;
import java.net.ConnectException;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* A specialized {@link HttpCommandExecutor} that will use a {@link DriverService} that lives
* and dies with a single WebDriver session. The service will be restarted upon each new session
* request and shutdown after each quit command.
*/
public class DriverCommandExecutor extends HttpCommandExecutor implements Closeable {
private final DriverService service;
private final ExecutorService executorService = Executors.newFixedThreadPool(2, r -> {
Thread thread = new Thread(r);
thread.setName("Driver Command Executor");
thread.setDaemon(true);
return thread;
});
/**
* Creates a new DriverCommandExecutor which will communicate with the driver as configured
* by the given {@code service}.
*
* @param service The DriverService to send commands to.
*/
public DriverCommandExecutor(DriverService service) {
super(Require.nonNull("DriverService", service.getUrl()));
this.service = service;
}
/**
* Creates an {@link DriverCommandExecutor} that supports non-standard
* {@code additionalCommands} in addition to the standard.
*
* @param service driver server
* @param additionalCommands additional commands the remote end can process
*/
protected DriverCommandExecutor(
DriverService service, Map<String, CommandInfo> additionalCommands) {
super(additionalCommands, service.getUrl());
this.service = service;
}
/**
* Sends the {@code command} to the driver server for execution. The server will be started
* if requesting a new session. Likewise, if terminating a session, the server will be shutdown
* once a response is received.
*
* @param command The command to execute.
* @return The command response.
* @throws IOException If an I/O error occurs while sending the command.
*/
@Override
public Response execute(Command command) throws IOException {
boolean newlyStarted = false;
if (DriverCommand.NEW_SESSION.equals(command.getName())) {
boolean wasRunningBefore = service.isRunning();
service.start();
newlyStarted = !wasRunningBefore && service.isRunning();
}
if (DriverCommand.QUIT.equals(command.getName())) {
CompletableFuture<Response> commandComplete = CompletableFuture.supplyAsync(() -> {
try {
return invokeExecute(command);
} catch (Throwable t) {
Throwable rootCause = Throwables.getRootCause(t);
if (rootCause instanceof IllegalStateException
&& "Closed".equals(rootCause.getMessage())) {
return null;
}
if (rootCause instanceof ConnectException
&& "Connection refused".equals(rootCause.getMessage())) {
throw new WebDriverException("The driver server has unexpectedly died!", t);
}
Throwables.throwIfUnchecked(t);
throw new WebDriverException(t);
}
}, executorService);
CompletableFuture<Response> processFinished = CompletableFuture.supplyAsync(() -> {
service.process.waitFor(service.getTimeout().toMillis());
return null;
}, executorService);
try {
Response response = (Response) CompletableFuture.anyOf(commandComplete, processFinished)
.get(service.getTimeout().toMillis() * 2, TimeUnit.MILLISECONDS);
service.stop();
return response;
} catch (ExecutionException | TimeoutException e) {
throw new WebDriverException("Timed out waiting for driver server to stop.", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new WebDriverException("Timed out waiting for driver server to stop.", e);
} finally {
executorService.shutdownNow();
}
} else {
try {
return invokeExecute(command);
} catch (Throwable t) {
Throwable rootCause = Throwables.getRootCause(t);
if (rootCause instanceof ConnectException &&
"Connection refused".equals(rootCause.getMessage()) &&
!service.isRunning()) {
throw new WebDriverException("The driver server has unexpectedly died!", t);
}
// an attempt to execute a command in the newly started driver server has failed
// hence need to stop it
if (newlyStarted && service.isRunning()) {
try {
service.stop();
} catch (Exception ignored) {
// fall through
}
}
Throwables.throwIfUnchecked(t);
throw new WebDriverException(t);
}
}
}
@VisibleForTesting
Response invokeExecute(Command command) throws IOException {
return super.execute(command);
}
@Override
public void close() {
executorService.shutdownNow();
}
}
| 1 | 19,373 | For the command executor, which in turn, uses the HTTP client to talk to the WebDriver, the client might have high-timeout values set, so the shutdown can take a long time if we wait for it to complete, especially if multiple-long running threads are there. I think it might be a good idea in general to couple the shutdown() with `awaitTermination()` with a timeout, if the ongoing tasks do not complete within that timeout then call `shutdownNow()`. What do you think? | SeleniumHQ-selenium | py |
@@ -38,7 +38,7 @@ public abstract class HiveMetastoreTest {
@BeforeClass
public static void startMetastore() throws Exception {
HiveMetastoreTest.metastore = new TestHiveMetastore();
- metastore.start();
+ metastore.start(new HiveConf(HiveMetastoreTest.class));
HiveMetastoreTest.hiveConf = metastore.hiveConf();
HiveMetastoreTest.metastoreClient = new HiveMetaStoreClient(hiveConf);
String dbPath = metastore.getDatabasePath(DB_NAME); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.util.HashMap;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Database;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public abstract class HiveMetastoreTest {
protected static final String DB_NAME = "hivedb";
protected static HiveMetaStoreClient metastoreClient;
protected static HiveCatalog catalog;
protected static HiveConf hiveConf;
protected static TestHiveMetastore metastore;
@BeforeClass
public static void startMetastore() throws Exception {
HiveMetastoreTest.metastore = new TestHiveMetastore();
metastore.start();
HiveMetastoreTest.hiveConf = metastore.hiveConf();
HiveMetastoreTest.metastoreClient = new HiveMetaStoreClient(hiveConf);
String dbPath = metastore.getDatabasePath(DB_NAME);
Database db = new Database(DB_NAME, "description", dbPath, new HashMap<>());
metastoreClient.createDatabase(db);
HiveMetastoreTest.catalog = new HiveCatalog(hiveConf);
}
@AfterClass
public static void stopMetastore() {
catalog.close();
HiveMetastoreTest.catalog = null;
metastoreClient.close();
HiveMetastoreTest.metastoreClient = null;
metastore.stop();
HiveMetastoreTest.metastore = null;
}
}
| 1 | 32,289 | Is this change needed? `start()` is still defined and uses `HiveMetastoreTest.class`. The only difference is that this doesn't pass a `Configuration` and the parameterless `start` passes `new Configuration()`. | apache-iceberg | java |
@@ -27,6 +27,7 @@ type stakingCommand struct {
stakingV1 Protocol
stakingV2 *staking.Protocol
candIndexer *CandidateIndexer
+ sr protocol.StateReader
}
// NewStakingCommand creates a staking command center to manage staking committee and new native staking | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package poll
import (
"context"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/staking"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/state"
)
type stakingCommand struct {
addr address.Address
hu config.HeightUpgrade
stakingV1 Protocol
stakingV2 *staking.Protocol
candIndexer *CandidateIndexer
}
// NewStakingCommand creates a staking command center to manage staking committee and new native staking
func NewStakingCommand(
hu config.HeightUpgrade,
candIndexer *CandidateIndexer,
stkV1 Protocol,
stkV2 *staking.Protocol,
) (Protocol, error) {
h := hash.Hash160b([]byte(protocolID))
addr, err := address.FromBytes(h[:])
if err != nil {
return nil, err
}
sc := stakingCommand{
hu: hu,
addr: addr,
stakingV1: stkV1,
stakingV2: stkV2,
candIndexer: candIndexer,
}
if stkV1 == nil && stkV2 == nil {
return nil, errors.New("empty staking protocol")
}
return &sc, nil
}
func (sc *stakingCommand) CreateGenesisStates(ctx context.Context, sm protocol.StateManager) error {
if sc.stakingV1 != nil {
if err := sc.stakingV1.CreateGenesisStates(ctx, sm); err != nil {
return err
}
}
if sc.stakingV2 != nil {
if err := sc.stakingV2.CreateGenesisStates(ctx, sm); err != nil {
return err
}
}
return nil
}
func (sc *stakingCommand) Start(ctx context.Context) error {
if sc.stakingV1 != nil {
if starter, ok := sc.stakingV1.(lifecycle.Starter); ok {
return starter.Start(ctx)
}
}
return nil
}
func (sc *stakingCommand) CreatePreStates(ctx context.Context, sm protocol.StateManager) error {
// TODO: handle V2
return sc.stakingV1.CreateGenesisStates(ctx, sm)
}
func (sc *stakingCommand) CreatePostSystemActions(ctx context.Context) ([]action.Envelope, error) {
return createPostSystemActions(ctx, sc)
}
func (sc *stakingCommand) Handle(ctx context.Context, act action.Action, sm protocol.StateManager) (*action.Receipt, error) {
if sc.stakingV1 == nil {
return handle(ctx, act, sm, sc.candIndexer, sc.addr.String())
}
if sc.stakingV2 == nil {
return sc.stakingV1.Handle(ctx, act, sm)
}
// transition to V2 starting Fairbank
height, err := sm.Height()
if err != nil {
return nil, err
}
if sc.hu.IsPost(config.Fairbank, height) {
return handle(ctx, act, sm, sc.candIndexer, sc.addr.String())
}
return sc.stakingV1.Handle(ctx, act, sm)
}
func (sc *stakingCommand) Validate(ctx context.Context, act action.Action) error {
return validate(ctx, sc, act)
}
func (sc *stakingCommand) CalculateCandidatesByHeight(ctx context.Context, height uint64) (state.CandidateList, error) {
if sc.stakingV1 == nil {
return sc.stakingV2.ActiveCandidates(ctx)
}
if sc.stakingV2 == nil {
return sc.stakingV1.CalculateCandidatesByHeight(ctx, height)
}
// transition to V2 starting Fairbank
if sc.hu.IsPost(config.Fairbank, height) {
return sc.stakingV2.ActiveCandidates(ctx)
}
return sc.stakingV1.CalculateCandidatesByHeight(ctx, height)
}
// DelegatesByEpoch returns exact number of delegates according to epoch number
func (sc *stakingCommand) DelegatesByEpoch(ctx context.Context, epochNum uint64) (state.CandidateList, error) {
// TODO: handle V2
return sc.stakingV1.DelegatesByEpoch(ctx, epochNum)
}
// CandidatesByHeight returns candidate list from state factory according to height
func (sc *stakingCommand) CandidatesByHeight(ctx context.Context, height uint64) (state.CandidateList, error) {
// TODO: handle V2
return sc.stakingV1.CandidatesByHeight(ctx, height)
}
func (sc *stakingCommand) ReadState(ctx context.Context, sr protocol.StateReader, method []byte, args ...[]byte) ([]byte, error) {
// TODO: handle V2
return sc.stakingV1.ReadState(ctx, sr, method, args...)
}
// Register registers the protocol with a unique ID
func (sc *stakingCommand) Register(r *protocol.Registry) error {
return r.Register(protocolID, sc)
}
// ForceRegister registers the protocol with a unique ID and force replacing the previous protocol if it exists
func (sc *stakingCommand) ForceRegister(r *protocol.Registry) error {
return r.ForceRegister(protocolID, sc)
}
| 1 | 21,548 | I think it is weird to store sr in protocol struct | iotexproject-iotex-core | go |
@@ -7308,6 +7308,16 @@ NATable * NATableDB::get(const ExtendedQualName* key, BindWA* bindWA, NABoolean
}
}
+ // the reload cqd will be set during aqr after compiletime and runtime
+ // timestamp mismatch is detected.
+ // If set, reload hive metadata.
+ if ((cachedNATable->isHiveTable()) &&
+ (CmpCommon::getDefault(HIVE_DATA_MOD_CHECK) == DF_ON) &&
+ (CmpCommon::getDefault(TRAF_RELOAD_NATABLE_CACHE) == DF_ON))
+ {
+ removeEntry = TRUE;
+ }
+
//Found in cache. If that's all the caller wanted, return now.
if ( !removeEntry && findInCacheOnly )
return cachedNATable; | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
**************************************************************************
*
* File: NATable.C
* Description: A Non-Alcoholic table
* Created: 4/27/94
* Language: C++
*
*
**************************************************************************
*/
#define SQLPARSERGLOBALS_FLAGS // must precede all #include's
#undef _DP2NT_
#define _DP2NT_
// #define NA_ARKFS
#define __ROSETTA
#undef _DP2NT_
// #undef NA_ARKFS
#undef __ROSETTA
#include "NATable.h"
#include "Sqlcomp.h"
#include "Const.h"
#include "desc.h"
#include "dfs2rec.h"
#include "hs_read.h"
#include "parser.h"
#include "BindWA.h"
#include "ComAnsiNamePart.h"
#include "ItemColRef.h"
#include "ItemFunc.h"
#include "ItemOther.h"
#include "PartFunc.h"
#include "EncodedValue.h"
#include "SchemaDB.h"
#include "NAClusterInfo.h"
#include "MVInfo.h"
#include "ComMPLoc.h"
#include "NATable.h"
#include "opt.h"
#include "CmpStatement.h"
#include "ControlDB.h"
#include "ComCextdecs.h"
#include "ComSysUtils.h"
#include "ComObjectName.h"
#include "SequenceGeneratorAttributes.h"
#include "security/uid.h"
#include "HDFSHook.h"
#include "ExpLOBexternal.h"
#include "ComCextdecs.h"
#include "ExpHbaseInterface.h"
#include "CmpSeabaseDDL.h"
#include "RelScan.h"
#include "exp_clause_derived.h"
#include "PrivMgrCommands.h"
#include "ComDistribution.h"
#include "ExExeUtilCli.h"
#include "CmpDescribe.h"
#include "Globals.h"
#include "ComUser.h"
#include "ComSmallDefs.h"
#include "CmpMain.h"
#define MAX_NODE_NAME 9
#include "SqlParserGlobals.h"
//#define __ROSETTA
//#include "rosetta_ddl_include.h"
#include "SqlParserGlobals.h"
extern desc_struct *generateSpecialDesc(const CorrName& corrName);
#include "CmpMemoryMonitor.h"
#include "OptimizerSimulator.h"
#include "SQLCLIdev.h"
#include "sql_id.h"
SQLMODULE_ID __SQL_mod_natable = {
/* version */ SQLCLI_CURRENT_VERSION,
/* module name */ "HP_SYSTEM_CATALOG.SYSTEM_SCHEMA.READDEF_N29_000",
/* time stamp */ 866668761818000LL,
/* char set */ "ISO88591",
/* name length */ 47
};
// -----------------------------------------------------------------------
// skipLeadingBlanks()
// Examines the given string keyValueBuffer from 'startIndex' for
// 'length' bytes and skips any blanks that appear as a prefix of the
// first non-blank character.
// -----------------------------------------------------------------------
Int64 HistogramsCacheEntry::getLastUpdateStatsTime()
{
return cmpCurrentContext->getLastUpdateStatsTime();
}
void HistogramsCacheEntry::setUpdateStatsTime(Int64 updateTime)
{
cmpCurrentContext->setLastUpdateStatsTime(updateTime);
}
static Int64 getCurrentTime()
{
// GETTIMEOFDAY returns -1, in case of an error
Int64 currentTime;
TimeVal currTime;
if (GETTIMEOFDAY(&currTime, 0) != -1)
currentTime = currTime.tv_sec;
else
currentTime = 0;
return currentTime;
}
void HistogramsCacheEntry::updateRefreshTime()
{
Int64 currentTime = getCurrentTime();
this->setRefreshTime(currentTime);
}
static Lng32 skipLeadingBlanks(const char * keyValueBuffer,
const Lng32 startIndex,
const Lng32 length)
{
Lng32 newIndex = startIndex;
Lng32 stopIndex = newIndex + length;
// Localize the search for blanks between the startIndex and stopIndex.
while ((newIndex <= stopIndex) AND (keyValueBuffer[newIndex] == ' '))
newIndex++;
return newIndex;
} // static skipLeadingBlanks()
// -----------------------------------------------------------------------
// skipTrailingBlanks()
// Examines the given string keyValueBuffer from startIndex down through
// 0 and skips any blanks that appear as a suffix of the first non-blank
// character.
// -----------------------------------------------------------------------
static Lng32 skipTrailingBlanks(const char * keyValueBuffer,
const Lng32 startIndex)
{
Lng32 newIndex = startIndex;
while ((newIndex >= 0) AND (keyValueBuffer[newIndex] == ' '))
newIndex--;
return newIndex;
} // static skipTrailingBlanks
//----------------------------------------------------------------------
// qualNameHashFunc()
// calculates a hash value given a QualifiedName.Hash value is mod by
// the hashTable size in HashDictionary.
//----------------------------------------------------------------------
ULng32 qualNameHashFunc(const QualifiedName& qualName)
{
ULng32 index = 0;
const NAString& name = qualName.getObjectName();
for(UInt32 i=0;i<name.length();i++)
{
index += (ULng32) (name[i]);
}
return index;
}
//-------------------------------------------------------------------------
//constructor() for HistogramCache
//-------------------------------------------------------------------------
HistogramCache::HistogramCache(NAMemory * heap,Lng32 initSize)
: heap_(heap),
hits_(0),
lookups_(0),
memoryLimit_(33554432),
lruQ_(heap), tfd_(NULL), mfd_(NULL), size_(0)
{
//create the actual cache
HashFunctionPtr hashFunc = (HashFunctionPtr)(&qualNameHashFunc);
histogramsCache_ = new (heap_)
NAHashDictionary<QualifiedName,HistogramsCacheEntry>
(hashFunc,initSize,TRUE,heap_);
}
//reset all entries to not accessedInCurrentStatement
void HistogramCache::resetAfterStatement()
{
for (CollIndex x=lruQ_.entries(); x>0; x--)
{
if (lruQ_[x-1]->accessedInCurrentStatement())
lruQ_[x-1]->resetAfterStatement();
}
}
//-------------------------------------------------------------------------
//invalidate what is in the cache
//-------------------------------------------------------------------------
void HistogramCache::invalidateCache()
{
while (lruQ_.entries())
{
HistogramsCacheEntry* entry = lruQ_[0];
deCache(&entry);
}
histogramsCache_->clearAndDestroy();
lruQ_.clear();
}
//--------------------------------------------------------------------------
// HistogramCache::getCachedHistogram()
// Looks for the histogram in the cache if it is there then makes a deep copy
// of it on the statementHeap() and returns it. If the histogram is not in
// the cache then it fetches the histogram and makes a deep copy of it on the
// context heap to store it in the hash table.
//--------------------------------------------------------------------------
#pragma nowarn(770) // warning elimination
void HistogramCache::getHistograms(NATable& table)
{
const QualifiedName& qualifiedName = table.getFullyQualifiedGuardianName();
ExtendedQualName::SpecialTableType type = table.getTableType();
const NAColumnArray& colArray = table.getNAColumnArray();
StatsList& colStatsList = *(table.getColStats());
const Int64& redefTime = table.getRedefTime();
Int64& statsTime = const_cast<Int64&>(table.getStatsTime());
//1//
//This 'flag' is set to NULL if FetchHistogram has to be called to
//get the statistics in case
//1. If a table's histograms are not in the cache
//2. If some kind of timestamp mismatch occurs and therefore the
// cached histogram has to be refreshed from disk.
//Pointer to cache entry for histograms on this table
HistogramsCacheEntry * cachedHistograms = NULL;
// skip reading the histograms if they have not been changed in last
// CACHE_HISTOGRAMS_REFRESH_INTERVAL hours
NABoolean skipRead = FALSE;
//Do we need to use the cache
//Depends on :
//1. If histogram caching is ON
//2. If the table is a normal table
if(CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
type == ExtendedQualName::NORMAL_TABLE)
{ //2//
// Do we have cached histograms for this table
// look up the cache and get a reference to statistics for this table
cachedHistograms = lookUp(table);
// first thing to check is, if the table to which the histograms are cached
// has been updated
if (cachedHistograms && (cachedHistograms->getRedefTime() != redefTime))
{
deCache(&cachedHistograms);
}
// If the histograms exist in the cache, then we want to avoid reading
// timestamps, if the histograms have not been updated in last default
// refresh time (CACHE_HISTOGRAMS_REFRESH_INTERVAL) or if the histograms in the cache
// are less than CACHE_HISTOGRAMS_REFRESH_INTERVAL old.
Int64 lastRefTimeDef, lastFakeRefTimeDef, currentTime;
if (cachedHistograms)
{
lastRefTimeDef = uint32ToInt64(CURRSTMT_OPTDEFAULTS->defRefTime());
lastFakeRefTimeDef = uint32ToInt64(CURRSTMT_OPTDEFAULTS->defFakeRefTime());
currentTime = getCurrentTime();
Int64 histLastRefreshedTime = cachedHistograms->getRefreshTime();
if (currentTime && cachedHistograms->isAllStatsFake())
{
// Check if it has been more than 'lastFakeRefTimeDef' secs
// (equal to CQD HIST_NO_STATS_REFRESH_INTERVAL) since histograms have
// been checked OR if update statistics automation is ON and it has
// been more than 'lastFakeRefTimeDef'/360 (should = 10 by default).
Int64 timeSinceLastHistRefresh = currentTime - histLastRefreshedTime;
if(!CURRSTMT_OPTDEFAULTS->ustatAutomation() && timeSinceLastHistRefresh > lastFakeRefTimeDef ||
CURRSTMT_OPTDEFAULTS->ustatAutomation() && timeSinceLastHistRefresh > lastFakeRefTimeDef/360)
{
//the histograms are in the cache but we need to re-read them because
//their default values might have been re-estimated
deCache(&cachedHistograms);
}
}
// Histograms are not fake. Check to see if we need to do anymore timestamp checks
if (currentTime && cachedHistograms && lastRefTimeDef > 0)
{
Int64 lastUpdateStatsTime = HistogramsCacheEntry::getLastUpdateStatsTime();
if ((lastUpdateStatsTime != -1) &&
((currentTime - lastUpdateStatsTime) < lastRefTimeDef))
{
// Last known update stats time for this table occurred less than
// CACHE_HISTOGRAMS_REFRESH_INTERVAL secs ago.
if (lastUpdateStatsTime < histLastRefreshedTime)
{
// Last time the histograms cache was refreshed for this table is newer
// than last known update stats time. Skip read of hists.
skipRead = TRUE;
}
}
else
// No update stats time recorded OR last known update stats time occurred
// more than CACHE_HISTOGRAMS_REFRESH_INTERVAL secs ago.
if ((currentTime - histLastRefreshedTime) < lastRefTimeDef)
// Histograms were refreshed less than CACHE_REFRESH_HISTOGRAMS_INTERVAL
// secs ago. Skip read of hists.
skipRead = TRUE;
}
}
//assumption:
//if tempHist is not NULL then it should have a pointer to full Histograms
//check if histogram preFetching is on
if(CURRSTMT_OPTDEFAULTS->preFetchHistograms() && cachedHistograms)
{ //3//
//we do need to preFetch histograms
if(!cachedHistograms->preFetched())
{ //4//
//preFetching is on but these histograms
//were not preFetched so delete them and
//re-Read them
deCache(&cachedHistograms);
} //4//
} //3//
//Check if there is a timestamp mis-match
if(cachedHistograms AND cachedHistograms->getRedefTime() != redefTime)
{ //5//
//the histograms are in the cache but we need to re-read them because of
//a time stamp mismatch
deCache(&cachedHistograms);
} //5//
else if (!skipRead)
{ //6//
//Do some more timestamp calculations and set re-Read flag if
//there is a mis-match
if(cachedHistograms)
{ //9 //
// Check when the histogram table was last modified. If this time doesn't equal
// the modification time of the cached histograms, OR this time is more than
// lastRefTimeDef secs old, call FetchStatsTime to read STATS_TIME field of
// the actual histogram. The last condition here is used to force a call of
// FetchStatsTime() after awhile. This is for update stats automation:
// FetchStatsTime() will update the READ_TIME field of the histogram.
Int64 modifTime;
Int64 currentJulianTime = NA_JulianTimestamp();
GetHSModifyTime(qualifiedName, type, modifTime, FALSE);
Int64 readCntInterval = (Int64)CmpCommon::getDefaultLong(USTAT_AUTO_READTIME_UPDATE_INTERVAL);
if (modifTime != 0)
// If the HISTOGRAMS table was modified since the last time FetchStatsTime()
// called and the time is not the same as the cached histograms OR
// if it was modified more than READTIME_UPDATE_INTERVAL secs ago and
// ustat automation is ON:
if (cachedHistograms->getModifTime() != modifTime ||
(currentJulianTime - modifTime > readCntInterval*1000000 &&
CmpCommon::getDefaultLong(USTAT_AUTOMATION_INTERVAL) > 0))
{ //10//
FetchStatsTime(qualifiedName,type,colArray,statsTime,FALSE);
cachedHistograms->updateRefreshTime();
// If ustat automation is on, FetchStatsTime will modify the HISTOGRAMS table.
// So, the new modification time of the HISTOGRAMS table must be saved to the
// cached histograms when automation is on, so that only changes to HISTOGRAMS
// by update stats cause the above 'if' to be TRUE.
if (CmpCommon::getDefaultLong(USTAT_AUTOMATION_INTERVAL) > 0)
{
GetHSModifyTime(qualifiedName, type, modifTime, FALSE);
cachedHistograms->setModifTime(modifTime);
}
if (cachedHistograms->getStatsTime() != statsTime)
{ //11//
deCache(&cachedHistograms);
} //11//
} //10//
} //9//
} //6//
} //2//
if( cachedHistograms )
{
hits_++;
}
else
{
lookups_++;
}
//retrieve the statistics for the table in colStatsList
createColStatsList(table, cachedHistograms);
//if not using histogram cache, then invalidate cache
if(!CURRSTMT_OPTDEFAULTS->cacheHistograms())
invalidateCache();
} //1//
#pragma warn(770) // warning elimination
//----------------------------------------------------------------------------
// HistogramCache::createColStatsList()
// This method actually puts the statistics for columns that require statistics
// into colStatsList.
// 1. If reRead is false meaning that the table's statistics exist in the cache,
// then this method gets statistics from the cache and copies them into
// colStatsList. If statistics for some columns are not found in the cache, then
// this method calls FetchHistograms to get statistics for these columns. It
// then puts these missing statistics into the cache, then copies the statistics
// from the cache into colStatsList
// 2. If reRead is true meaning that we need to get statistics from disk via
// FetchHistograms. reRead can be true for any of the following cases:
// a. Histogram Caching is on but we updated statistics since we last read them
// so we have deleted the old statistics and we need to read the tables
// statistics again from disk.
// 3. If histograms are being Fetched on demand meaning that histogram caching is off,
// then this method will fetch statistics into colStatsList using FetchHistograms.
//
// Now that we also have the option of reducing the number of intervals in histograms
// this method also factors that in.
//
// Each entry of the colArray contains information about a column that tells
// us what kind of histograms is required by that colum. The decision on what
// kind of a histograms is required for a column is base on the following factors
//
// 1. A column that is not referenced and neither is a index/primary key does
// not need histogram
//
// 2. Column that is a index/primary key or is referenced in the query but not part
// of a predicate or groupby or orderby clause requires compressed histogram.
// A full histogram can be altered to make it seem like a compressed histogram.
//
// 3. Columns that are part of a predicate or are in orderby or groupby clause requires
// full histogram are referencedForHistogram. A full histogram can only satisfy
// the requirement for a full histogram.
//
// Just to for the sake of reitirating the main point:
// Columns that are referencedForHistogram needs full histogram
// Columns that are just referenced or is a index/primary key only requires a
// compressed histogram
//----------------------------------------------------------------------------
void HistogramCache::createColStatsList
(NATable& table, HistogramsCacheEntry* cachedHistograms)
{
StatsList& colStatsList = *(table.getColStats());
NAColumnArray& colArray = const_cast<NAColumnArray&>
(table.getNAColumnArray());
const QualifiedName& qualifiedName = table.getFullyQualifiedGuardianName();
ExtendedQualName::SpecialTableType type = table.getTableType();
const Int64& redefTime = table.getRedefTime();
Int64& statsTime = const_cast<Int64&>(table.getStatsTime());
// The singleColsFound is used to prevent stats from being inserted
// more than once in the output list.
ColumnSet singleColsFound(STMTHEAP);
//"lean" cachedHistograms/are in the context heap.
//colStatsList is in the statement heap.
//The context heap persists for the life of this mxcmp.
//The statement heap is deleted at end of a compilation.
//getStatsListFromCache will expand "lean" cachedHistograms
//into "fat" colStatsList.
//this points to the stats list
//that is used to fetch statistics
//that are not in the cache
StatsList * statsListForFetch=NULL;
// Used to count the number of columns
// whose histograms are in the cache.
UInt32 coveredList = 0;
//Do we need to use the cache
//Depends on :
//1. If histogram caching is ON
//2. If the table is a normal table
if(cachedHistograms && (CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
type == ExtendedQualName::NORMAL_TABLE))
{
// getStatsListFromCache will unmark columns that have statistics
// in cachedHistograms. All columns whose statistics are not in
// cachedHistogram are still marked as needing histograms.
// This is then passed into FetchHistograms, which will
// return statistics for columns marked as needing histograms.
// colArray tells getStatsListFromCache what columns need
// histograms. getStatsListFromCache uses colArray to tell
// us what columns were not found in cachedHistograms.
// get statistics from cachedHistograms into list.
// colArray has the columns whose histograms we need.
coveredList = getStatsListFromCache
(colStatsList, colArray, cachedHistograms, singleColsFound);
}
Int64 modifTime = 0;
// set to TRUE if all columns in the table have default statistics
NABoolean allFakeStats = TRUE;
//if some of the needed statistics were not found in the cache
//then call FetchHistograms to get those statistics
if (colArray.entries() > coveredList)
{
//this is the stats list into which statistics will be fetched
statsListForFetch = &colStatsList;
if(CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
type == ExtendedQualName::NORMAL_TABLE)
{
//if histogram caching is on and not all histograms where found in the cache
//then create a new stats list object to get histograms that were missing
statsListForFetch = new(CmpCommon::statementHeap())
StatsList(CmpCommon::statementHeap(),2*colArray.entries());
}
//set pre-fetching to false by default
NABoolean preFetch = FALSE;
//turn prefetching on if caching is on and
//we want to prefetch histograms
if(CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
CURRSTMT_OPTDEFAULTS->preFetchHistograms() &&
(type == ExtendedQualName::NORMAL_TABLE))
preFetch = TRUE;
// flag the unique columns so the uec can be set correctly
// specially in the case of columns with fake stats
for (CollIndex j = 0; j < colArray.entries(); j++)
{
NAList<NAString> keyColList(STMTHEAP, 1);
NAColumn *col = colArray[j];
if (!col->isUnique())
{
const NAString &colName = col->getColName();
keyColList.insert(colName);
// is there a unique index on this column?
if (col->needHistogram () &&
table.getCorrespondingIndex(keyColList, // input columns
TRUE, // look for explicit index
TRUE, // look for unique index
FALSE, // look for primary key
FALSE, // look for any index or primary key
FALSE, // sequence of cols doesn't matter
FALSE, // don't exclude computed cols
NULL // index name
))
col->setIsUnique();
}
}
FetchHistograms(qualifiedName,
type,
(colArray),
(*statsListForFetch),
FALSE,
CmpCommon::statementHeap(),
modifTime,
statsTime,
allFakeStats,//set to TRUE if all columns have default stats
preFetch,
(Int64) CURRSTMT_OPTDEFAULTS->histDefaultSampleSize()
);
}
//check if we are using the cache
if(CURRSTMT_OPTDEFAULTS->cacheHistograms() &&
type == ExtendedQualName::NORMAL_TABLE)
{
//we are using the cache but did we already
//have the statistics in cache
if(cachedHistograms)
{
// yes some of the statistics where already in cache
// Did we find statistics in the cache for all the columns
// whose statistics we needed?
if (colArray.entries() > coveredList)
{
// not all the required statistics were in the cache,
// some statistics were missing from the cache entry.
// therefore must have done a FetchHistograms to get
// the missing histograms. Now update the cache entry
// by adding the missing histograms that were just fetched
ULng32 histCacheHeapSize = heap_->getAllocSize();
cachedHistograms->addToCachedEntry(colArray,(*statsListForFetch));
ULng32 entrySizeGrowth = (heap_->getAllocSize() - histCacheHeapSize);
ULng32 entrySize = cachedHistograms->getSize() + entrySizeGrowth;
cachedHistograms->setSize(entrySize);
size_ += entrySizeGrowth;
//get statistics from the cache that where missing from the
//cache earlier and have since been added to the cache
coveredList = getStatsListFromCache
(colStatsList, colArray, cachedHistograms, singleColsFound);
}
}
else
{
CMPASSERT(statsListForFetch);
// used the cache but had to re-read
// all the table's histograms from disk
// put the re-read histograms into cache
putStatsListIntoCache((*statsListForFetch), colArray, qualifiedName,
modifTime, statsTime, redefTime, allFakeStats);
// look up the cache and get a reference to statistics for this table
cachedHistograms = lookUp(table);
// get statistics from the cache
coveredList = getStatsListFromCache
(colStatsList, colArray, cachedHistograms, singleColsFound);
}
}
if(CURRSTMT_OPTDEFAULTS->reduceBaseHistograms())
colStatsList.reduceNumHistIntsAfterFetch(table);
//clean up
if(statsListForFetch != &colStatsList)
delete statsListForFetch;
// try to decache any old entries if we're over the memory limit
if(CURRSTMT_OPTDEFAULTS->cacheHistograms())
{
enforceMemorySpaceConstraints();
}
traceTable(table);
}
//------------------------------------------------------------------------
//HistogramCache::getStatsListFromCache()
//gets the StatsList into list from cachedHistograms and
//returns the number of columns whose statistics were
//found in the cache. The columns whose statistics are required
//are passed in through colArray.
//------------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
Int32 HistogramCache::getStatsListFromCache
( StatsList& list, //In \ Out
NAColumnArray& colArray, //In
HistogramsCacheEntry* cachedHistograms, // In
ColumnSet& singleColsFound) //In \ Out
{
// cachedHistograms points to the memory-efficient contextheap
// representation of table's histograms.
// list points to statementheap list container that caller is
// expecting us to fill-in with ColStats required by colArray.
// counts columns whose histograms are in cache or not needed
UInt32 columnsCovered = 0;
// Collect the mc stats with this temporary list. If the
// mc stats objects are stored in the middle of the output 'list',
// IndexDescHistograms::appendHistogramForColumnPosition() will
// abort, because "There must be a ColStatDesc for every key column!".
StatsList mcStatsList(CmpCommon::statementHeap());
//iterate over all the columns in the colArray
for(UInt32 i=0;i<colArray.entries();i++)
{
//get a reference to the column
NAColumn * column = colArray[i];
//get the position of the column in the table
CollIndex colPos = column->getPosition();
// singleColsFound is used to prevent stats from
// being inserted more than once in the output list.
if (singleColsFound.contains(colPos))
{
columnsCovered++;
continue;
}
NABoolean columnNeedsHist = column->needHistogram();
NABoolean columnNeedsFullHist = column->needFullHistogram();
// Did histograms for this column get added
NABoolean colAdded = FALSE;
if (NOT columnNeedsHist)
{
//if the column was marked as not needing any histogram
//then increment columnsCovered & skip to next column, as neither
//single interval nor full histograms are required for this column.
columnsCovered++;
}
else if (cachedHistograms->contains(colPos) AND columnNeedsHist)
{
//we have full histograms for this column
columnsCovered++;
colAdded = TRUE;
//set flag in column not to fetch histogram
//the histogram is already in cache
column->setDontNeedHistogram();
NABoolean copyIntervals=TRUE;
ColStatsSharedPtr const singleColStats =
cachedHistograms->getHistForCol(*column);
if (NOT columnNeedsFullHist)
{
//full histograms are not required. get single interval histogram
//from the full histogram and insert it into the user's statslist
copyIntervals=FALSE;
}
//since we've tested containment, we are guaranteed to get a
//non-null histogram for column
list.insertAt
(list.entries(),
ColStats::deepCopySingleColHistFromCache
(*singleColStats, *column, list.heap(), copyIntervals));
}
//Assumption: a multi-column histogram is retrieved when
//histograms for any of its columns are retrieved.
if (columnNeedsHist)
{
// insert all multicolumns referencing column
// use singleColsFound to avoid duplicates
cachedHistograms->getMCStatsForColFromCacheIntoList
(mcStatsList, *column, singleColsFound);
}
// if column was added, then add it to the duplist
if (colAdded) singleColsFound += colPos;
}
// append the mc stats at the end of the output lit.
for (Lng32 i=0; i<mcStatsList.entries(); i++ ) {
list.insertAt(list.entries(), mcStatsList[i]);
}
return columnsCovered;
}
#pragma warn(1506) // warning elimination
//this method is used to put into the cache stats lists, that
//needed to be re-read or were not there in the cache
void HistogramCache::putStatsListIntoCache(StatsList & colStatsList,
const NAColumnArray& colArray,
const QualifiedName & qualifiedName,
Int64 modifTime,
Int64 statsTime,
const Int64 & redefTime,
NABoolean allFakeStats)
{
ULng32 histCacheHeapSize = heap_->getAllocSize();
// create memory efficient representation of colStatsList
HistogramsCacheEntry * histogramsForCache = new (heap_)
HistogramsCacheEntry(colStatsList, qualifiedName,
modifTime, statsTime, redefTime, heap_);
ULng32 cacheEntrySize = heap_->getAllocSize() - histCacheHeapSize;
if(CmpCommon::getDefault(CACHE_HISTOGRAMS_CHECK_FOR_LEAKS) == DF_ON)
{
delete histogramsForCache;
ULng32 histCacheHeapSize2 = heap_->getAllocSize();
CMPASSERT( histCacheHeapSize == histCacheHeapSize2);
histogramsForCache = new (heap_)
HistogramsCacheEntry(colStatsList, qualifiedName,
modifTime, statsTime, redefTime, heap_);
cacheEntrySize = heap_->getAllocSize() - histCacheHeapSize2;
}
histogramsForCache->setSize(cacheEntrySize);
if(FALSE)
{
delete histogramsForCache;
histogramsForCache = new (heap_)
HistogramsCacheEntry(colStatsList, qualifiedName,
modifTime, statsTime, redefTime, heap_);
}
// add it to the cache
QualifiedName* key = const_cast<QualifiedName*>
(histogramsForCache->getName());
QualifiedName *name = histogramsCache_->insert(key, histogramsForCache);
if (name)
{
// append it to least recently used queue
lruQ_.insertAt(lruQ_.entries(), histogramsForCache);
}
size_ += cacheEntrySize;
}
// if we're above memoryLimit_, try to decache
NABoolean HistogramCache::enforceMemorySpaceConstraints()
{
if (size_ <= memoryLimit_)
return TRUE;
HistogramsCacheEntry* entry = NULL;
while (lruQ_.entries())
{
entry = lruQ_[0];
if (entry->accessedInCurrentStatement())
return FALSE;
deCache(&entry);
if (size_ <= memoryLimit_)
return TRUE;
}
return FALSE;
}
// lookup given table's histograms.
// if found, return its HistogramsCacheEntry*.
// otherwise, return NULL.
HistogramsCacheEntry* HistogramCache::lookUp(NATable& table)
{
const QualifiedName& tblNam = table.getFullyQualifiedGuardianName();
HistogramsCacheEntry* hcEntry = NULL;
if (histogramsCache_)
{
// lookup given table's lean histogram cache entry
hcEntry = histogramsCache_->getFirstValue(&tblNam);
if (hcEntry)
{
// move entry to tail of least recently used queue
lruQ_.remove(hcEntry);
lruQ_.insertAt(lruQ_.entries(), hcEntry);
}
}
return hcEntry;
}
// decache entry
void HistogramCache::deCache(HistogramsCacheEntry** entry)
{
if (entry && (*entry))
{
ULng32 entrySize = (*entry)->getSize();
histogramsCache_->remove(const_cast<QualifiedName*>((*entry)->getName()));
lruQ_.remove(*entry);
ULng32 heapSizeBeforeDelete = heap_->getAllocSize();
delete (*entry);
ULng32 memReclaimed = heapSizeBeforeDelete - heap_->getAllocSize();
if(CmpCommon::getDefault(CACHE_HISTOGRAMS_CHECK_FOR_LEAKS) == DF_ON)
CMPASSERT( memReclaimed >= entrySize );
*entry = NULL;
size_ -= entrySize;
}
}
void HistogramCache::resizeCache(size_t limit)
{
memoryLimit_ = limit;
enforceMemorySpaceConstraints();
}
ULng32 HistogramCache::entries() const
{
return histogramsCache_ ? histogramsCache_->entries() : 0;
}
void HistogramCache::display() const
{
HistogramCache::print();
}
void
HistogramCache::print(FILE *ofd, const char* indent, const char* title) const
{
#ifndef NDEBUG
BUMP_INDENT(indent);
fprintf(ofd,"%s%s\n",NEW_INDENT,title);
fprintf(ofd,"entries: %d \n", entries());
fprintf(ofd,"size: %d bytes\n", size_);
for (CollIndex x=lruQ_.entries(); x>0; x--)
{
lruQ_[x-1]->print(ofd, indent, "HistogramCacheEntry");
}
#endif
}
void HistogramCache::traceTable(NATable& table) const
{
if (tfd_)
{
NAString tableName(table.getTableName().getQualifiedNameAsString());
fprintf(tfd_,"table:%s\n",tableName.data());
table.getColStats()->trace(tfd_, &table);
fflush(tfd_);
}
}
void HistogramCache::traceTablesFinalize() const
{
if (tfd_)
{
fprintf(tfd_,"cache_size:%d\n", size_);
fprintf(tfd_,"cache_heap_size:" PFSZ "\n", heap_->getAllocSize());
fflush(tfd_);
}
}
void HistogramCache::closeTraceFile()
{
if (tfd_) fclose(tfd_);
tfd_ = NULL;
}
void HistogramCache::openTraceFile(const char *filename)
{
tfd_ = fopen(filename, "w+");
}
void HistogramCache::closeMonitorFile()
{
if (mfd_) fclose(mfd_);
mfd_ = NULL;
}
void HistogramCache::openMonitorFile(const char *filename)
{
mfd_ = fopen(filename, "w+");
}
void HistogramCache::monitor() const
{
// if histogram caching is off, there's nothing to monitor
if(!OptDefaults::cacheHistograms()) return;
if (mfd_)
{
for (CollIndex x=lruQ_.entries(); x>0; x--)
{
lruQ_[x-1]->monitor(mfd_);
}
if (CmpCommon::getDefault(CACHE_HISTOGRAMS_MONITOR_MEM_DETAIL) == DF_ON)
{
fprintf(mfd_,"cache_size:%d\n", size_);
fprintf(mfd_,"cache_heap_size:" PFSZ "\n", heap_->getAllocSize());
}
fflush(mfd_);
}
}
// constructor for memory efficient representation of colStats.
// colStats has both single-column & multi-column histograms.
HistogramsCacheEntry::HistogramsCacheEntry
(const StatsList & colStats,
const QualifiedName & qualifiedName,
const Int64 & modifTime,
const Int64 & statsTime,
const Int64 & redefTime,
NAMemory * heap)
: full_(NULL), multiColumn_(NULL), name_(NULL), heap_(heap)
, refreshTime_(0), singleColumnPositions_(heap)
, accessedInCurrentStatement_(TRUE)
, size_(0)
{
modifTime_ = modifTime;
statsTime_ = statsTime;
updateRefreshTime();
redefTime_ = redefTime;
preFetched_ = CURRSTMT_OPTDEFAULTS->preFetchHistograms();
allFakeStats_ = colStats.allFakeStats();
// make a deep copy of the key.
// qualifiedName is short-lived (from stmtheap).
// name_ is longer-lived (from contextheap).
name_ = new(heap_) QualifiedName(qualifiedName, heap_);
// create pointers to full single-column histograms (include fake)
UInt32 singleColumnCount = colStats.getSingleColumnCount();
if (singleColumnCount > 0)
{
full_ = new(heap_) NAList<ColStatsSharedPtr>(heap_, singleColumnCount);
// fill-in pointers to deep copy of single-column histograms
for(UInt32 i=0; i<colStats.entries();i++)
{
const NAColumnArray& colArray = colStats[i]->getStatColumns();
if (colArray.entries() == 1)
{
// keep pointer to deep copy of single-column histogram
full_->insertAt(full_->entries(),
ColStats::deepCopyHistIntoCache(*(colStats[i]),heap_));
// update singleColumnPositions
singleColumnPositions_ +=
(Lng32)colArray.getColumn(Lng32(0))->getPosition();
}
}
}
// create pointers to multi-column histograms
multiColumn_ = new(heap_) MultiColumnHistogramList(heap_);
// add deep copy of multi-column histograms (but, avoid duplicates)
multiColumn_->addMultiColumnHistograms(colStats);
}
// insertDeepCopyIntoCache adds histograms of the sametype
// (single-column and/or multi-column) to this cache entry
void
HistogramsCacheEntry::addToCachedEntry
(NAColumnArray & columns, StatsList & list)
{
// update allFakeStats_
if (allFakeStats_)
allFakeStats_ = list.allFakeStats();
//iterate over all the colstats in the stats list passed in
ColumnSet singleColHistAdded(heap_);
for(UInt32 j=0;j<list.entries();j++)
{
//get the columns for the current colstats
NAColumnArray colList = list[j]->getStatColumns();
//get the first column for the columns represented by
//the current colstats
NAColumn * column = colList.getColumn(Lng32(0));
//column position of first column
Lng32 currentColPosition = column->getPosition();
//check if current column requires full histograms
NABoolean requiresHistogram = column->needHistogram();
//check if current colstats is a single-column histogram
NABoolean singleColHist = (colList.entries()==1? TRUE: FALSE);
NABoolean mcForHbasePart = list[j]->isMCforHbasePartitioning ();
//only fullHistograms are inserted in full_.
//We also add fake histograms to the cache.
//This will help us not to call FetchHistograms
//for a column that has fake statistics.
//Previously we did not cache statistics for
//columns that did not have statistics in the histograms tables
//(FetchHistogram faked statistics for such column).
//Since statistics for such columns were not found in the
//cache we had to repeatedly call FetchHistogram
//to get statistics for these columns
//instead of just getting the fake statistics from the cache.
//FetchHistograms always return fake statistics for such columns
//so why not just cache them and not call FetchHistograms.
//When statistics are added for these columns then the timestamp
//matching code will realize that and
//re-read the statistics for the table again.
if((requiresHistogram || NOT singleColHist)|| list[j]->isFakeHistogram())
{
//if single column Histograms
//if((singleColHist || mcForHbasePart) && (!singleColumnPositions_.contains(currentColPosition)))
if((singleColHist) && (!singleColumnPositions_.contains(currentColPosition)))
{
//Current colstats represent a single column histogram
//Insert the colstats from the stats list passed in, at the end of
//this objects stats list (represented by colStats_).
full_->insertAt(full_->entries(),
ColStats::deepCopyHistIntoCache(*(list[j]),heap_));
singleColHistAdded += currentColPosition;
}
else if (NOT singleColHist)
{
//Assumption: a multi-column histogram is retrieved when
//histograms for any of its columns are retrieved.
//e.g. Table T1(a int, b int, c int)
//histograms: {a},{b},{c},{a,b},{a,c},{b,c},{a,b,c}
//If histograms for column a are fetched we will get
//histograms: {a}, {a,b}, {a,c}, {a,b,c}
//If histograms for column b are fetched we will get
//histograms: {b}, {a,b}, {b,c}, {a,b,c}
//Therefore to avoid duplicated multicolumn stats being inserted
//we pass down the list of single columns for which we have stats
//Current colstats represent a multicolumn histogram
addMultiColumnHistogram(*(list[j]), &singleColumnPositions_);
}
}
}
singleColumnPositions_ += singleColHistAdded;
}
// add multi-column histogram to this cache entry
void
HistogramsCacheEntry::addMultiColumnHistogram
(const ColStats& mcStat, ColumnSet* singleColPositions)
{
if (!multiColumn_)
multiColumn_ = new(heap_) MultiColumnHistogramList(heap_);
multiColumn_->addMultiColumnHistogram(mcStat, singleColPositions);
}
const QualifiedName*
HistogramsCacheEntry::getName() const
{
return name_;
}
const ColStatsSharedPtr
HistogramsCacheEntry::getStatsAt(CollIndex x) const
{
if (!full_ OR x > full_->entries())
return NULL;
else
return full_->at(x);
}
const MultiColumnHistogram*
HistogramsCacheEntry::getMultiColumnAt(CollIndex x) const
{
if (!multiColumn_ OR x > multiColumn_->entries())
return NULL;
else
return multiColumn_->at(x);
}
// return pointer to full single-column histogram identified by col
ColStatsSharedPtr const
HistogramsCacheEntry::getHistForCol (NAColumn& col) const
{
if (!full_) return NULL;
// search for colPos in full_
for(UInt32 i=0; i < full_->entries(); i++)
{
// have we found colPos?
if (((*full_)[i]->getStatColumnPositions().entries() == 1) AND
(*full_)[i]->getStatColumnPositions().contains(col.getPosition()))
{
return (*full_)[i];
}
}
return NULL;
}
// insert all multicolumns referencing col into list
// use singleColsFound to avoid duplicates
void
HistogramsCacheEntry::getMCStatsForColFromCacheIntoList
(StatsList& list, // out: "fat" rep of multi-column stats for col
NAColumn& col, // in: column whose multi-column stats we want
ColumnSet& singleColsFound) // in: columns whose single-column
//stats have already been processed by caller.
//Assumption: a multi-column histogram is retrieved when
//histograms for any of its columns are retrieved.
{
CollIndex multiColCount = multiColumnCount();
if (multiColCount <= 0) return; // entry has no multicolumn stats
// search entry's multicolumn stats for col
NAMemory* heap = list.heap();
for(UInt32 i=0; i<multiColCount; i++)
{
const MultiColumnHistogram* mcHist = getMultiColumnAt(i);
if (mcHist)
{
ColumnSet mcCols(mcHist->cols(), STMTHEAP);
if (!mcCols.contains(col.getPosition()))
continue; // no col
if ((mcCols.intersectSet(singleColsFound)).entries())
continue; // avoid dup
// create "fat" representation of multi-column histogram
ColStatsSharedPtr mcStat;
if (col.getNATable()->isHbaseTable() && col.isPrimaryKey()) {
// For mcStats covering a key column of a HBASE table,
// create a colStat object with multi-intervals, which will
// be useful in allowing better stats-based split.
mcStat = new (STMTHEAP) ColStats(*(mcHist->getColStatsPtr()),
STMTHEAP, TRUE);
} else {
ComUID id(mcHist->id());
CostScalar uec(mcHist->uec());
CostScalar rows(mcHist->rows());
mcStat = new (STMTHEAP) ColStats
(id, uec, rows, rows, FALSE, FALSE, NULL, FALSE,
1.0, 1.0, 0, STMTHEAP, FALSE);
// populate its NAColumnArray with mcCols
(*mcStat).populateColumnArray(mcHist->cols(), col.getNATable());
// set up its histogram interval
HistogramSharedPtr histogram = new(STMTHEAP) Histogram(heap);
HistInt loInt;
NABoolean boundaryInclusive = TRUE;
HistInt hiInt(1, NULL, (*mcStat).statColumns(),
rows, uec, boundaryInclusive, 0);
histogram->insert(loInt);
histogram->insert(hiInt);
mcStat->setHistogram(histogram);
MCSkewedValueList * mcSkewedValueList = new (STMTHEAP) MCSkewedValueList (*(mcHist->getMCSkewedValueList()), STMTHEAP);
mcStat->setMCSkewedValueList(*mcSkewedValueList);
}
// append to list the mcStat
list.insertAt(list.entries(), mcStat);
}
}
}
//destructor
HistogramsCacheEntry::~HistogramsCacheEntry()
{
if(full_)
{
ColStatsSharedPtr colStat = NULL;
while(full_->getFirst(colStat))
{
colStat->deepDeleteFromHistogramCache();
//colStats is a shared pointer
//and will not be deleted till
//ref count goes to zero
//Therefore to avoid leaks and
//ensure colStats is deleted we
//do the following
ColStats * colStatPtr = colStat.get();
colStat.reset();
delete colStatPtr;
}
delete full_;
}
if(multiColumn_)
delete multiColumn_;
if(name_)
delete name_;
singleColumnPositions_.clear();
}
void HistogramsCacheEntry::display() const
{
HistogramsCacheEntry::print();
}
void HistogramsCacheEntry::monitor(FILE* mfd) const
{
NAString tableName(name_->getQualifiedNameAsString());
fprintf(mfd,"table:%s\n",tableName.data());
if (CmpCommon::getDefault(CACHE_HISTOGRAMS_MONITOR_HIST_DETAIL) == DF_ON)
{
if (full_)
{
for (CollIndex x=0; x<full_->entries(); x++)
{
full_->at(x)->trace(mfd, NULL);
}
}
if (multiColumn_)
{
multiColumn_->print(mfd, NULL);
}
}
if (CmpCommon::getDefault(CACHE_HISTOGRAMS_MONITOR_MEM_DETAIL) == DF_ON)
fprintf(mfd,"table_size:%d\n",size_);
fflush(mfd);
}
void HistogramsCacheEntry::print
(FILE *ofd, const char* indent, const char* title) const
{
#ifndef NDEBUG
BUMP_INDENT(indent);
fprintf(ofd,"%s%s\n",NEW_INDENT,title);
name_->print(ofd);
fprintf(ofd,"accessedInCurrentStatement_:%d ", accessedInCurrentStatement_);
fprintf(ofd,"allFakeStats_:%d ", allFakeStats_);
fprintf(ofd,"preFetched_:%d \n", preFetched_);
char time[30];
convertInt64ToAscii(modifTime_, time);
fprintf(ofd,"modifTime_:%s ", time);
convertInt64ToAscii(redefTime_, time);
fprintf(ofd,"redefTime_:%s ", time);
convertInt64ToAscii(refreshTime_, time);
fprintf(ofd,"refreshTime_:%s ", time);
convertInt64ToAscii(statsTime_, time);
fprintf(ofd,"statsTime_:%s ", time);
convertInt64ToAscii(getLastUpdateStatsTime(), time);
fprintf(ofd,"lastUpdateStatsTime:%s \n", time);
fprintf(ofd,"single-column histograms:%d ", singleColumnCount());
singleColumnPositions_.printColsFromTable(ofd,NULL);
if (full_)
{
for (CollIndex x=0; x<full_->entries(); x++)
{
full_->at(x)->print(ofd);
}
}
fprintf(ofd,"multi-column histograms:%d ", multiColumnCount());
if (multiColumn_)
{
multiColumn_->print(ofd);
}
#endif
}
// -----------------------------------------------------------------------
// getRangePartitionBoundaryValues()
// This method receives a string within which the partitioning key values
// appear in a comma-separated sequence. It returns an ItemExprList that
// contains ConstValue expressions for representing each partitioning
// key value as shown below:
//
// ------ ------ ------
// "<value1>, <value2>, <value3>" => | | ---> | | ---> | |
// ------ ------ ------
// | | |
// v v v
// ConstValue ConstValue ConstValue
// (<value1>) (<value2>) (<value3>)
//
// -----------------------------------------------------------------------
ItemExpr * getRangePartitionBoundaryValues
(const char * keyValueBuffer,
const Lng32 keyValueBufferSize,
NAMemory* heap,
CharInfo::CharSet strCharSet = CharInfo::UTF8
)
{
char * keyValue; // the string for the key value
ItemExpr * partKeyValue; // -> dynamically allocated expression
Lng32 length; // index to the next key value and its length
Lng32 startIndex = 0;
Lng32 stopIndex = keyValueBufferSize-1;
startIndex = skipLeadingBlanks(keyValueBuffer, startIndex, stopIndex);
// Skip leading '('
NABoolean leadingParen = FALSE;
if (keyValueBuffer[startIndex] == '(')
{
leadingParen = TRUE;
startIndex++;
}
stopIndex = skipTrailingBlanks(&keyValueBuffer[startIndex], stopIndex);
// Skip trailing ')' only if there was a leading paren. This
// is the case where the value comes in as (<value>)
if ((keyValueBuffer[stopIndex] == ')') &&
(leadingParen == TRUE))
stopIndex--;
length = stopIndex - startIndex + 1;
NAString keyValueString( &keyValueBuffer[startIndex], (size_t) length );
// ---------------------------------------------------------------------
// Copy the string from the keyValueBuffer into a string that
// is terminated by a semicolon and a null.
// ---------------------------------------------------------------------
keyValue = new (heap) char[length + 1 /* for semicolon */ + 1 /* for eol */ ];
// strncpy( keyValue, keyValueString.data(), (size_t) length );
//soln:10-031112-1256
// strncpy replaced with memcpy to handle columns of the partition?s first key value is
// NULL character within double-quote eg:( ?\0? ). i.e ( "( "6666673" , "\0" , 8060928 )").
memcpy(keyValue, (char *)( keyValueString.data() ), (size_t) length );
keyValue[length] = ';';
keyValue[length+1] = '\0';
// ---------------------------------------------------------------------
// Create a new ItemExprList using the parse tree generated from the
// string of comma-separated literals.
// ---------------------------------------------------------------------
Parser parser(CmpCommon::context());
//partKeyValue = parser.getItemExprTree(keyValue);
partKeyValue = parser.getItemExprTree(keyValue,length+1,strCharSet);
// Check to see if the key values parsed successfully. An error
// could occur if the table is an MP Table and the first key values
// contain MP syntax that is not supported by MX. For instance
// Datetime literals which do not have the max number of digits in
// each field. (e.g. DATETIME '1999-2-4' YEAR TO DAY)
//
if(partKeyValue == NULL) {
return NULL;
}
return partKeyValue->copyTree(heap);
} // static getRangePartitionBoundaryValues()
// In some cases we don't have a text representation of the start keys,
// only the encoded keys (e.g. from HBase regions start keys). In this
// case, un-encode these binary values and form ConstValues from them.
static ItemExpr * getRangePartitionBoundaryValuesFromEncodedKeys(
const NAColumnArray & partColArray,
const char * encodedKey,
const Lng32 encodedKeyLen,
NAMemory* heap)
{
Lng32 keyColOffset = 0;
ItemExpr *result = NULL;
char *actEncodedKey = (char *) encodedKey; // original key or a copy
const char* encodedKeyP = NULL;
char* varCharstr = NULL;
Lng32 totalKeyLength = 0;
Lng32 numProvidedCols = 0;
Lng32 lenOfFullyProvidedCols = 0;
// in newer HBase versions, the region start key may be shorter than an actual key
for (CollIndex i = 0; i < partColArray.entries(); i++)
{
const NAType *pkType = partColArray[i]->getType();
Lng32 colEncodedLength = pkType->getSQLnullHdrSize() + pkType->getNominalSize();
totalKeyLength += colEncodedLength;
if (totalKeyLength <= encodedKeyLen)
{
// this column is fully provided in the region start key
numProvidedCols++;
lenOfFullyProvidedCols = totalKeyLength;
}
}
if (encodedKeyLen < totalKeyLength)
{
// the provided key does not cover all the key columns
// need to extend the partial buffer, allocate a copy
actEncodedKey = new(heap) char[totalKeyLength];
memcpy(actEncodedKey, encodedKey, encodedKeyLen);
// extend the remainder with zeroes, assuming that this is what
// HBase does when deciding which region a row belongs to
memset(&actEncodedKey[encodedKeyLen], 0, totalKeyLength-encodedKeyLen);
Lng32 currOffset = lenOfFullyProvidedCols;
// go through the partially or completely missing columns and make something up
// so that we can treat the buffer as fully encoded in the final loop below
for (CollIndex j = numProvidedCols; j < partColArray.entries(); j++)
{
const NAType *pkType = partColArray[j]->getType();
Lng32 nullHdrSize = pkType->getSQLnullHdrSize();
int valOffset = currOffset + nullHdrSize;
int valEncodedLength = pkType->getNominalSize();
Lng32 colEncodedLength = nullHdrSize + valEncodedLength;
NABoolean isDescending = (partColArray[j]->getClusteringKeyOrdering() == DESCENDING);
NABoolean nullHdrAlreadySet = FALSE;
NABoolean columnIsPartiallyProvided = (currOffset < encodedKeyLen);
if (columnIsPartiallyProvided)
{
// This column is partially provided, try to make sure that it has a valid
// value. Note that the buffer has a prefix of some bytes with actual key
// values, followed by bytes that are zeroed out.
// the number of bytes actually provided in the key (not filled in)
int numBytesInProvidedVal = encodedKeyLen-valOffset;
if (nullHdrSize && numBytesInProvidedVal <= 0)
{
// only the null-header or a part thereof was provided
CMPASSERT(nullHdrSize == sizeof(short));
// get the partial indicator values into a short
short indicatorVal = *reinterpret_cast<short *>(&actEncodedKey[currOffset]);
// make it either 0 or -1
if (indicatorVal)
indicatorVal = -1;
// put it back and let the code below know that we set it already
// (this is handled otherwise as a non-provided column)
memcpy(&actEncodedKey[currOffset], &indicatorVal, sizeof(indicatorVal));
nullHdrAlreadySet = TRUE;
columnIsPartiallyProvided = FALSE;
}
// Next, decide by data type whether it's ok for the type to have
// a suffix of the buffer zeroed out (descending columns will
// see 0xFF values, once the encoded value gets inverted). If the
// type can't take it or we are not quite sure, we'll just discard
// all the partial information. Note that this could potentially
// lead to two partition boundaries with the same key, and also
// to partition boundaries that don't reflect the actual region
// boundaries.
if (columnIsPartiallyProvided)
switch (pkType->getTypeQualifier())
{
case NA_NUMERIC_TYPE:
{
NumericType *nt = (NumericType *) pkType;
if (!nt->isExact() || nt->isDecimal() || nt->isBigNum() ||
(isDescending && nt->decimalPrecision()))
// we may be able to improve this in the future
columnIsPartiallyProvided = FALSE;
}
break;
case NA_DATETIME_TYPE:
case NA_INTERVAL_TYPE:
// those types should tolerate zeroing out trailing bytes, but
// not filling with 0xFF
if (isDescending)
columnIsPartiallyProvided = FALSE;
break;
case NA_CHARACTER_TYPE:
// generally, character types should also tolerate zeroing out
// trailing bytes, but we might need to clean up characters
// that got split in the middle
{
CharInfo::CharSet cs = pkType->getCharSet();
switch (cs)
{
case CharInfo::UCS2:
// For now just accept partial characters, it's probably ok
// since they are just used as a key. May look funny in EXPLAIN.
break;
case CharInfo::UTF8:
{
// temporarily invert the provided key so it is actual UTF8
if (isDescending)
for (int i=0; i<numBytesInProvidedVal; i++)
actEncodedKey[valOffset+i] = ~actEncodedKey[valOffset+i];
CMPASSERT(numBytesInProvidedVal > 0);
// remove a trailing partial character, if needed
int validLen = lightValidateUTF8Str(&actEncodedKey[valOffset],
numBytesInProvidedVal);
// replace the remainder of the buffer with UTF8 min/max chars
fillWithMinMaxUTF8Chars(&actEncodedKey[valOffset+validLen],
valEncodedLength - validLen,
0,
isDescending);
// limit to the max # of UTF-8characters, if needed
if (pkType->getPrecisionOrMaxNumChars() > 0)
{
// this time validate the # of chars (likely to be more,
// since we filled to the end with non-blanks)
validLen = lightValidateUTF8Str(&actEncodedKey[valOffset],
valEncodedLength,
pkType->getPrecisionOrMaxNumChars());
if (validLen > 0)
// space after valid #chars is filled with blanks
memset(&actEncodedKey[valOffset+validLen], ' ', valEncodedLength-validLen);
else
columnIsPartiallyProvided = FALSE;
}
// undo the inversion, if needed, now for the whole key
if (isDescending)
for (int k=0; k<valEncodedLength; k++)
actEncodedKey[valOffset+k] = ~actEncodedKey[valOffset+k];
}
break;
case CharInfo::ISO88591:
// filling with 0x00 or oxFF should both be ok
break;
default:
// don't accept partial keys for other charsets
columnIsPartiallyProvided = FALSE;
break;
}
}
break;
default:
// don't accept partial keys for any other data types
columnIsPartiallyProvided = FALSE;
break;
}
if (columnIsPartiallyProvided)
{
// a CQD can suppress, give errors, warnings or enable partially provided cols
DefaultToken tok = CmpCommon::getDefault(HBASE_RANGE_PARTITIONING_PARTIAL_COLS);
switch (tok)
{
case DF_OFF:
// disable use of partial columns
// (use this as a workaround if they cause problems)
columnIsPartiallyProvided = FALSE;
break;
case DF_MINIMUM:
// give an error (again, this is probably mostly used as a
// workaround or to detect past problems)
*CmpCommon::diags() << DgSqlCode(-1212) << DgInt0(j);
break;
case DF_MEDIUM:
// give a warning, could be used for searching or testing
*CmpCommon::diags() << DgSqlCode(+1212) << DgInt0(j);
break;
case DF_ON:
case DF_MAXIMUM:
default:
// allow it, no warning or error
break;
}
}
if (columnIsPartiallyProvided)
// from now on, treat it as if it were fully provided
numProvidedCols++;
}
if (!columnIsPartiallyProvided)
{
// This column is not at all provided in the region start key
// or we decided to erase the partial value.
// Generate the min/max value for ASC/DESC key columns.
// NOTE: This is generating un-encoded values, unlike
// the values we get from HBase. The next loop below
// will skip decoding for any values generated here.
Lng32 remainingBufLen = valEncodedLength;
if (nullHdrSize && !nullHdrAlreadySet)
{
// generate a NULL indicator
// NULL (-1) for descending columns, this is the max value
// non-NULL (0) for ascending columns, min value is non-null
short indicatorVal = (isDescending ? -1 : 0);
CMPASSERT(nullHdrSize == sizeof(short));
memcpy(&actEncodedKey[currOffset], &indicatorVal, sizeof(indicatorVal));
}
pkType->minMaxRepresentableValue(&actEncodedKey[valOffset],
&remainingBufLen,
isDescending,
NULL,
heap);
}
currOffset += colEncodedLength;
} // loop through columns not entirely provided
} // provided encoded key length < total key length
for (CollIndex c = 0; c < partColArray.entries(); c++)
{
const NAType *pkType = partColArray[c]->getType();
Lng32 decodedValueLen =
pkType->getNominalSize() + pkType->getSQLnullHdrSize();
ItemExpr *keyColVal = NULL;
// does this column need encoding (only if it actually came
// from an HBase split key, if we made up the value it's
// already in the decoded format)
if (pkType->isEncodingNeeded() && c < numProvidedCols)
{
encodedKeyP = &actEncodedKey[keyColOffset];
// for varchar the decoding logic expects the length to be in the first
// pkType->getVarLenHdrSize() chars, so add it.
// Please see bug LP 1444134 on how to improve this in the long term.
// Note that this is less than ideal:
// - A VARCHAR is really encoded as a fixed char in the key, as
// the full length without a length field
// - Given that an encoded key is not aligned, we should really
// consider it a byte string, e.g. a character type with charset
// ISO88591, which tolerates any bit patterns. Considering the
// enoded key as the same data type as the column causes all kinds
// of problems.
// - The key decode function in the expressions code expects the varchar
// length field, even though it is not present in an actual key. So,
// we add it here in a separate buffer.
// - When we generate a ConstValue to represent the decoded key, we also
// need to include the length field, with length = max. length
if (pkType->getTypeName() == "VARCHAR")
{
Int32 varLenSize = pkType->getVarLenHdrSize() ;
Int32 nullHdrSize = pkType->getSQLnullHdrSize();
// Format of encodedKeyP :| null hdr | varchar data|
// Format of VarcharStr : | null hdr | var len hdr | varchar data|
varCharstr = new (heap) char[decodedValueLen + varLenSize];
if (nullHdrSize > 0)
str_cpy_all(varCharstr, encodedKeyP, nullHdrSize);
// careful, this works on little-endian systems only!!
str_cpy_all(varCharstr+nullHdrSize, (char*) &decodedValueLen,
varLenSize);
str_cpy_all(varCharstr+nullHdrSize+varLenSize,
encodedKeyP+nullHdrSize,
decodedValueLen-nullHdrSize);
decodedValueLen += pkType->getVarLenHdrSize();
encodedKeyP = varCharstr;
}
// un-encode the key value by using an expression
NAString encConstLiteral("encoded_val");
ConstValue *keyColEncVal =
new (heap) ConstValue(pkType,
(void *) encodedKeyP,
decodedValueLen,
&encConstLiteral,
heap);
CMPASSERT(keyColEncVal);
if (keyColEncVal->isNull())
{
// do not call the expression evaluator if the value
// to be decoded is NULL.
keyColVal = keyColEncVal ;
}
else
{
keyColVal =
new(heap) CompDecode(keyColEncVal,
pkType,
!partColArray.isAscending(c),
decodedValueLen,
CollationInfo::Sort,
TRUE,
heap);
keyColVal->synthTypeAndValueId();
keyColVal = keyColVal->evaluate(heap);
if ( !keyColVal )
return NULL;
}
} // encoded
else
{
// simply use the provided value as the binary value of a constant
keyColVal =
new (heap) ConstValue(pkType,
(void *) &actEncodedKey[keyColOffset],
decodedValueLen,
NULL,
heap);
}
// this and the above assumes that encoded and unencoded values
// have the same length
keyColOffset += decodedValueLen;
if (pkType->getTypeName() == "VARCHAR")
{
keyColOffset -= pkType->getVarLenHdrSize();
NADELETEBASIC (varCharstr, heap);
varCharstr = NULL;
}
if (result)
result = new(heap) ItemList(result, keyColVal);
else
result = keyColVal;
}
// make sure we consumed the entire key but no more than that
CMPASSERT(keyColOffset == totalKeyLength);
if (actEncodedKey != encodedKey)
NADELETEBASIC(actEncodedKey, heap);
return result;
} // static getRangePartitionBoundaryValuesFromEncodedKeys()
// -----------------------------------------------------------------------
// createRangePartitionBoundaries()
// This method is used for creating a tuple, which defines the maximum
// permissible values that the partitioning key columns can contain
// within a certain partition, for range-partitioned data.
// -----------------------------------------------------------------------
NABoolean checkColumnTypeForSupportability(const NAColumnArray & partColArray, const char* key)
{
NABoolean floatWarningIssued = FALSE;
for (CollIndex c = 0; c < partColArray.entries(); c++) {
const NAType *pkType = partColArray[c]->getType();
// For the EAP release, the unsupported types are the non-standard
// SQL/MP Datetime types. For the FCS release the unsupported
// types are the FRACTION only SQL/MP Datetime types.
//
// They are (for now) represented as CHAR types that have a
// non-zero MP Datetime size.
//
NABoolean unsupportedPartnKey = FALSE;
NABoolean unsupportedFloatDatatype = FALSE;
if (NOT pkType->isSupportedType())
unsupportedPartnKey = TRUE;
else if (DFS2REC::isFloat(pkType->getFSDatatype())) {
const NATable * naTable = partColArray[c]->getNATable();
if ((CmpCommon::getDefault(MARIAQUEST_PROCESS) == DF_OFF) &&
(NOT naTable->isSeabaseTable()) &&
(NOT naTable->isHiveTable())) {
unsupportedPartnKey = TRUE;
unsupportedFloatDatatype = TRUE;
}
}
if (unsupportedPartnKey) {
// Get the name of the table which has the unsupported
// partitioning key column.
//
const NAString &tableName =
partColArray[c]->getNATable()->
getTableName().getQualifiedNameAsAnsiString();
if (unsupportedFloatDatatype)
*CmpCommon::diags()
<< DgSqlCode(-1120);
else
// ERROR 1123 Unable to process the partition key values...
*CmpCommon::diags()
<< DgSqlCode(-1123)
<< DgString0(key)
<< DgTableName(tableName);
return FALSE;
}
}
return TRUE;
}
// -----------------------------------------------------------------------
// createRangePartitionBoundaries()
// This method is used for creating a tuple, which defines the maximum
// permissible values that the partitioning key columns can contain
// within a certain partition, for range-partitioned data.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
static RangePartitionBoundaries * createRangePartitionBoundaries
(desc_struct * part_desc_list,
Lng32 numberOfPartitions,
const NAColumnArray & partColArray,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// ASSUMPTION: The partitions descriptor list is a singly-linked list
// ========== in which the first element is the descriptor for the
// first partition and the last element is the descriptor
// for the last partition, in partitioning key sequence.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
CMPASSERT(partns_desc->body.partns_desc.primarypartition);
// Check all the partitioning keys. If any of them are not
// supported, issue an error and return.
//
// Skip past the primary partition, so that a meaningful first
// key value can be used for the error message.
char* key = (partns_desc->header.next) ->body.partns_desc.firstkey;
if ( !checkColumnTypeForSupportability(partColArray, key) )
return NULL;
// ---------------------------------------------------------------------
// Allocate a new RangePartitionBoundaries.
// ---------------------------------------------------------------------
RangePartitionBoundaries * partBounds = new (heap)
RangePartitionBoundaries
(numberOfPartitions,
partColArray.entries(),heap);
// ---------------------------------------------------------------------
// compute the length of the encoded partitioning key
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// Iterate over all the partitions and define the boundary (maximum
// permissible key values) for each one of them.
// The first key for the first partition cannot be specified in
// the CREATE TABLE command. It is therefore stored as an empty
// string in the SMD.
// NOTE: The RangePartitionBoundaries is 0 based.
// ---------------------------------------------------------------------
partns_desc = partns_desc->header.next; // skip the primary partition
Lng32 counter = 1;
char* encodedKey;
while (partns_desc AND (counter < numberOfPartitions))
{
encodedKey = partns_desc->body.partns_desc.encodedkey;
size_t encodedKeyLen = partns_desc->body.partns_desc.encodedkeylen;
if(heap != CmpCommon::statementHeap())
{
//we don't know here if encodedkey is a regular char or a wchar
//if it's a wchar then it should end with "\0\0", so add an extra
//'\0' to the end, it wont hurt anyways. Copying encodedKeyLen+1 chars
//will include one '\0' character and we add an extra '\0' to the end
//to make it "\0\0".
encodedKey = new(heap) char [encodedKeyLen+2];
encodedKey[encodedKeyLen] = encodedKey[encodedKeyLen+1] = '\0';
str_cpy_all(encodedKey, partns_desc->body.partns_desc.encodedkey,
encodedKeyLen);
}
ItemExpr *rangePartBoundValues = NULL;
if (partns_desc->body.partns_desc.firstkey)
// Extract and parse the partition boundary values, producing an
// ItemExprList of the boundary values.
//
rangePartBoundValues = getRangePartitionBoundaryValues(
partns_desc->body.partns_desc.firstkey,
partns_desc->body.partns_desc.firstkeylen,
heap);
else
rangePartBoundValues = getRangePartitionBoundaryValuesFromEncodedKeys(
partColArray,
encodedKey,
encodedKeyLen,
heap);
// Check to see if the key values parsed successfully. An error
// could occur if the table is an MP Table and the first key
// values contain MP syntax that is not supported by MX. For
// instance Datetime literals which do not have the max number
// of digits in each field. (e.g. DATETIME '1999-2-4' YEAR TO
// DAY)
//
if (rangePartBoundValues == NULL) {
// Get the name of the table which has the 'bad' first key
// value. Use the first entry in the array of partition
// columns (partColArray) to get to the NATable object.
//
const NAString &tableName =
partColArray[0]->getNATable()->
getTableName().getQualifiedNameAsAnsiString();
// The Parser will have already issued an error.
// ERROR 1123 Unable to process the partition key values...
*CmpCommon::diags()
<< DgSqlCode(-1123)
<< DgString0(partns_desc->body.partns_desc.firstkey)
<< DgTableName(tableName);
delete partBounds;
//coverity[leaked_storage]
return NULL;
}
partBounds->defineUnboundBoundary(
counter++,
rangePartBoundValues,
encodedKey);
partns_desc = partns_desc->header.next;
} // end while (partns_desc)
// ---------------------------------------------------------------------
// Before doing consistency check setup for the statement
// ---------------------------------------------------------------------
partBounds->setupForStatement(FALSE);
// ---------------------------------------------------------------------
// Perform a consistency check to ensure that a boundary was defined
// for each partition.
// ---------------------------------------------------------------------
partBounds->checkConsistency(numberOfPartitions);
return partBounds;
} // static createRangePartitionBoundaries()
#pragma warn(1506) // warning elimination
// -----------------------------------------------------------------------
// createRangePartitioningFunction()
// This method is used for creating a rangePartitioningFunction.
// -----------------------------------------------------------------------
static PartitioningFunction * createRangePartitioningFunction
(desc_struct * part_desc_list,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Compute the number of partitions.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
Lng32 numberOfPartitions = 0;
while (partns_desc)
{
numberOfPartitions++;
partns_desc = partns_desc->header.next;
}
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
numberOfPartitions = MAXOF(1,numberOfPartitions);
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
// ---------------------------------------------------------------------
// Create the partitioning key ranges
// ---------------------------------------------------------------------
RangePartitionBoundaries *boundaries =
createRangePartitionBoundaries(part_desc_list,
numberOfPartitions,
partKeyColArray,
heap);
// Check to see if the boundaries were created successfully. An
// error could occur if one of the partitioning keys is an
// unsupported type or if the table is an MP Table and the first key
// values contain MP syntax that is not supported by MX. For the
// EAP release, the unsupported types are the non-standard SQL/MP
// Datetime types. For the FCS release the unsupported types are
// the FRACTION only SQL/MP Datetime types. An example of a syntax
// error is a Datetime literal which does not have the max number of
// digits in each field. (e.g. DATETIME '1999-2-4' YEAR TO DAY)
//
if (boundaries == NULL) {
// The Parser may have already issued an error.
// ERROR 1123 Unable to process the partition key values...
// will have been issued by createRangePartitionBoundaries.
//
return NULL;
}
return new (heap) RangePartitioningFunction(boundaries, // memory leak??
nodeMap, heap);
} // static createRangePartitioningFunction()
// -----------------------------------------------------------------------
// createRoundRobinPartitioningFunction()
// This method is used for creating a RoundRobinPartitioningFunction.
// -----------------------------------------------------------------------
// LCOV_EXCL_START :cnu
static PartitioningFunction * createRoundRobinPartitioningFunction
(desc_struct * part_desc_list,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Compute the number of partitions.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
Lng32 numberOfPartitions = 0;
while (partns_desc)
{
numberOfPartitions++;
partns_desc = partns_desc->header.next;
}
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
numberOfPartitions = MAXOF(1,numberOfPartitions);
// For round robin partitioning, must create the partitioning function
// even for one partition, since the SYSKEY must be generated for
// round robin and this is trigger off the partitioning function.
//
// if (numberOfPartitions == 1)
// return new (heap) SinglePartitionPartitioningFunction(nodeMap);
return new (heap) RoundRobinPartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createRoundRobinPartitioningFunction()
// LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// createHashDistPartitioningFunction()
// This method is used for creating a HashDistPartitioningFunction.
// -----------------------------------------------------------------------
static PartitioningFunction * createHashDistPartitioningFunction
(desc_struct * part_desc_list,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Compute the number of partitions.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
Lng32 numberOfPartitions = 0;
while (partns_desc)
{
numberOfPartitions++;
partns_desc = partns_desc->header.next;
}
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
numberOfPartitions = MAXOF(1,numberOfPartitions);
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return new (heap) HashDistPartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createHashDistPartitioningFunction()
// -----------------------------------------------------------------------
// createHash2PartitioningFunction()
// This method is used for creating a Hash2PartitioningFunction.
// -----------------------------------------------------------------------
static PartitioningFunction * createHash2PartitioningFunction
(desc_struct * part_desc_list,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Compute the number of partitions.
// ---------------------------------------------------------------------
desc_struct * partns_desc = part_desc_list;
Lng32 numberOfPartitions = 0;
while (partns_desc)
{
numberOfPartitions++;
partns_desc = partns_desc->header.next;
}
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
numberOfPartitions = MAXOF(1,numberOfPartitions);
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return new (heap) Hash2PartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createHash2PartitioningFunction()
static PartitioningFunction * createHash2PartitioningFunction
(Int32 numberOfPartitions,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return new (heap) Hash2PartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createHash2PartitioningFunction()
static
NodeMap* createNodeMapForHbase(desc_struct* desc, const NATable* table,
int numSaltBuckets, NAMemory* heap)
{
Int32 partns = 0;
Int32 numRegions = 0;
desc_struct* hrk = desc;
while ( hrk ) {
numRegions++;
hrk=hrk->header.next;
}
if (numSaltBuckets <= 1)
partns = numRegions;
else
partns = numSaltBuckets;
NodeMap* nodeMap = new (heap)
NodeMap(heap, partns, NodeMapEntry::ACTIVE, NodeMap::HBASE);
// get nodeNames of region servers by making a JNI call
// do it only for multiple partition table
// TBD: co-location for tables where # of salt buckets and # regions don't match
if (partns > 1 && (CmpCommon::getDefault(TRAF_ALLOW_ESP_COLOCATION) == DF_ON) &&
(numSaltBuckets <= 1 || numSaltBuckets == numRegions)) {
ARRAY(const char *) nodeNames(heap, partns);
if (table->getRegionsNodeName(partns, nodeNames)) {
for (Int32 p=0; p < partns; p++) {
NAString node(nodeNames[p], heap);
// remove anything after node name
size_t size = node.index('.');
if (size && size != NA_NPOS)
node.remove(size);
// populate NodeMape with region server node ids
nodeMap->setNodeNumber(p, nodeMap->mapNodeNameToNodeNum(node));
}
}
}
return nodeMap;
}
static
PartitioningFunction*
createHash2PartitioningFunctionForHBase(desc_struct* desc,
const NATable * table,
int numSaltBuckets,
NAMemory* heap)
{
desc_struct* hrk = desc;
NodeMap* nodeMap = createNodeMapForHbase(desc, table, numSaltBuckets, heap);
Int32 partns = nodeMap->getNumEntries();
PartitioningFunction* partFunc;
if ( partns > 1 )
partFunc = new (heap) Hash2PartitioningFunction(partns, nodeMap, heap);
else
partFunc = new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return partFunc;
}
// -----------------------------------------------------------------------
// createRangePartitionBoundaries()
// This method is used for creating a tuple, which defines the maximum
// permissible values that the partitioning key columns can contain
// within a certain partition, for range-partitioned data.
//
// The boundary values of the range partitions are completely defined by
// a histogram's boundary values.
//
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
RangePartitionBoundaries * createRangePartitionBoundariesFromStats
(const IndexDesc* idesc,
HistogramSharedPtr& hist,
Lng32 numberOfPartitions,
const NAColumnArray & partColArray,
const ValueIdList& partitioningKeyColumnsOrder,
const Int32 statsColsCount,
NAMemory* heap)
{
if ( (!checkColumnTypeForSupportability(partColArray, "")) ||
(numberOfPartitions != hist->numIntervals()) ||
(partColArray.entries() < statsColsCount)
)
return NULL;
// ---------------------------------------------------------------------
// Allocate a new RangePartitionBoundaries.
// ---------------------------------------------------------------------
RangePartitionBoundaries * partBounds = new (heap)
RangePartitionBoundaries
(numberOfPartitions,
partColArray.entries(),heap);
// ---------------------------------------------------------------------
// compute the length of the encoded partitioning key
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// Iterate over all the partitions and define the boundary (maximum
// permissible key values) for each one of them.
// The first key for the first partition cannot be specified in
// the CREATE TABLE command. It is therefore stored as an empty
// string in the SMD.
// NOTE: The RangePartitionBoundaries is 0 based.
// ---------------------------------------------------------------------
Lng32 counter = 1;
ULng32 totalEncodedKeyLength = 0;
Interval iter = hist->getFirstInterval();
while ( iter.isValid() ) {
totalEncodedKeyLength = 0;
NAString* evInStr = NULL;
NAColumn* ncol = partColArray[0];
const NAType* nt = ncol->getType();
double ev = ( !iter.isLast() ) ?
iter.hiBound().getDblValue() : nt->getMaxValue();
if ((partColArray.entries() == 1) && (statsColsCount == 1))
{
// Convert the double into a string value of the type of
// the leading key column
evInStr = nt->convertToString(ev, heap);
}
else if ((partColArray.entries() > 1) && (statsColsCount == 1))
{
MCboundaryValueList mcEv;
mcEv.insert(EncodedValue(ev));
evInStr = mcEv.convertToString(partColArray, iter.isLast());
}
else // partColArray.entries() > 1 && statsColsCount > 1
{
MCboundaryValueList mcEv = iter.hiMCBound();
evInStr = mcEv.convertToString(partColArray, iter.isLast());
}
if ( !evInStr )
return NULL;
// Construct a boundary as ItemExprList of ConstValues
ItemExpr* rangePartBoundValues = getRangePartitionBoundaryValues(
evInStr->data(), evInStr->length(), heap, CharInfo::ISO88591);
NAString totalEncodedKeyBuf;
ItemExpr* val = NULL;
ItemExpr* encodeExpr = NULL ;
ItemExprList* list = NULL;
list = new (heap) ItemExprList(rangePartBoundValues, heap,ITM_ITEM_LIST,FALSE);
for (CollIndex c = 0; c < partColArray.entries(); c++)
{
NAColumn* ncol = partColArray[c];
const NAType* nt = ncol->getType();
if (rangePartBoundValues->getOperatorType() == ITM_ITEM_LIST )
val = (ItemExpr*) (*list) [c];
else
val = (ItemExpr*) (*list) [0];
if (nt->isEncodingNeeded())
encodeExpr = new(heap) CompEncode(val, !(partColArray.isAscending(c)));
else
encodeExpr = val;
encodeExpr->synthTypeAndValueId();
const NAType& eeNT = encodeExpr->getValueId().getType();
ULng32 encodedKeyLength = eeNT.getEncodedKeyLength();
char* encodedKeyBuffer = new (heap) char[encodedKeyLength];
Lng32 offset;
Lng32 length;
ValueIdList vidList;
short ok = vidList.evaluateTree(encodeExpr,
encodedKeyBuffer,
encodedKeyLength,
&length,
&offset,
(CmpCommon::diags()));
totalEncodedKeyLength += encodedKeyLength;
totalEncodedKeyBuf += encodedKeyBuffer;
if ( ok != 0 )
return NULL;
}
char* char_totalEncodedKeyBuf =new char[totalEncodedKeyLength];
memcpy (char_totalEncodedKeyBuf, totalEncodedKeyBuf.data(), totalEncodedKeyLength);
if (totalEncodedKeyLength != 0)
{
partBounds->defineUnboundBoundary(
counter++,
rangePartBoundValues,
char_totalEncodedKeyBuf);
}
iter.next();
}
// ---------------------------------------------------------------------
// Before doing consistency check setup for the statement
// ---------------------------------------------------------------------
partBounds->setupForStatement(FALSE);
// ---------------------------------------------------------------------
// Perform a consistency check to ensure that a boundary was defined
// for each partition.
// ---------------------------------------------------------------------
partBounds->checkConsistency(numberOfPartitions);
// -----------------------------------------------------------------
// Add the first and the last boundary (0 and numberOfPartitions)
// at the ends that do not separate two partitions
// -----------------------------------------------------------------
partBounds->completePartitionBoundaries(
partitioningKeyColumnsOrder,
totalEncodedKeyLength);
return partBounds;
} // createRangePartitionBoundariesFromStats()
#pragma warn(1506) // warning elimination
static
PartitioningFunction*
createRangePartitioningFunctionForSingleRegionHBase(
const NAColumnArray & partKeyColArray,
NAMemory* heap
)
{
NodeMap* nodeMap = NULL;
Lng32 regionsToFake =
(ActiveSchemaDB()->getDefaults()).getAsLong(HBASE_USE_FAKED_REGIONS);
if ( regionsToFake == 0 ) {
nodeMap = new (heap)
NodeMap(heap, 1, NodeMapEntry::ACTIVE, NodeMap::HBASE);
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
}
nodeMap = new (heap)
NodeMap(heap, regionsToFake, NodeMapEntry::ACTIVE, NodeMap::HBASE);
//
// Setup an array of doubles to record the next begin key value for
// each key column. Needed when the table has a single region.
// The number ranges is controlled by CQD HBASE_USE_FAKED_REGIONS.
//
// Later on, we can make smart split utilizing the stats.
//
Int32 keys = partKeyColArray.entries();
double* firstkeys = new (heap) double[keys];
double* steps = new (heap) double[keys];
for ( Int32 i=0; i<keys; i++ ) {
double min = partKeyColArray[i]->getType()->getMinValue();
double max = partKeyColArray[i]->getType()->getMaxValue();
firstkeys[i] = partKeyColArray[i]->getType()->getMinValue();
steps[i] = (max - min) / regionsToFake;
}
struct desc_struct* head = NULL;
struct desc_struct* tail = NULL;
Int32 i=0;
for ( i=0; i<regionsToFake; i++ ) {
if ( tail == NULL ) {
head = tail = new (heap) struct desc_struct;
// to satisfy createRangePartitionBoundaries() in NATable.cpp
tail->body.partns_desc.primarypartition = 1;
} else {
tail->header.next = new (heap) struct desc_struct;
tail = tail->header.next;
}
tail->header.next = NULL;
NAString firstkey('(');
for ( Int32 i=0; i<keys; i++ ) {
double v = firstkeys[i];
NAString* v_str = partKeyColArray[i]->getType()->convertToString(v,heap);
// If for some reason we can not make the conversion, we
// return a single-part func.
if ( !v_str ) {
nodeMap = new (heap)
NodeMap(heap, 1, NodeMapEntry::ACTIVE, NodeMap::HBASE);
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
}
firstkey.append(*v_str);
if ( i < keys-1 )
firstkey.append(',');
// Prepare for the next range
firstkeys[i] += steps[i];
}
firstkey.append(')');
Int32 len = firstkey.length();
tail->body.partns_desc.firstkeylen = len;
tail->body.partns_desc.firstkey = new (heap) char[len];
memcpy(tail->body.partns_desc.firstkey, firstkey.data(), len);
// For now, assume firstkey == encodedkey
tail->body.partns_desc.encodedkeylen = len;
tail->body.partns_desc.encodedkey = new (heap) char[len];
memcpy(tail->body.partns_desc.encodedkey, firstkey.data(), len);
}
//
return createRangePartitioningFunction
(head,
partKeyColArray,
nodeMap,
heap);
}
void
populatePartnDescOnEncodingKey( struct desc_struct* prevEndKey,
struct desc_struct* tail,
struct desc_struct* hrk,
NAMemory* heap)
{
if (!prevEndKey) {
// the start key of the first partitions has all zeroes in it
Int32 len = hrk->body.hbase_region_desc.endKeyLen;
tail->body.partns_desc.encodedkeylen = len;
tail->body.partns_desc.encodedkey = new (heap) char[len];
memset(tail->body.partns_desc.encodedkey, 0, len);
}
else {
// the beginning key of this partition is the end key of
// the previous one
// (HBase returns end keys, we need begin keys here)
Int32 len = prevEndKey->body.hbase_region_desc.endKeyLen;
// For HBase regions, we don't have the text representation
// (value, value, ... value) of the boundary, just the encoded
// key.
tail->body.partns_desc.encodedkeylen = len;
tail->body.partns_desc.encodedkey = new (heap) char[len];
memcpy(tail->body.partns_desc.encodedkey,
prevEndKey->body.hbase_region_desc.endKey, len);
}
}
void
populatePartnDescOnFirstKey( struct desc_struct* ,
struct desc_struct* tail,
struct desc_struct* hrk,
NAMemory* heap)
{
char* buf = hrk->body.hbase_region_desc.beginKey;
Int32 len = hrk->body.hbase_region_desc.beginKeyLen;
NAString firstkey('(');
firstkey.append('\'');
firstkey.append(buf, len);
firstkey.append('\'');
firstkey.append(')');
Int32 keyLen = firstkey.length();
tail->body.partns_desc.firstkeylen = keyLen;
tail->body.partns_desc.firstkey = new (heap) char[keyLen];
memcpy(tail->body.partns_desc.firstkey, firstkey.data(), keyLen);
tail->body.partns_desc.encodedkeylen = keyLen;
tail->body.partns_desc.encodedkey = new (heap) char[keyLen];
memcpy(tail->body.partns_desc.encodedkey, firstkey.data(), keyLen);
}
typedef void (*populatePartnDescT)( struct desc_struct* prevEndKey,
struct desc_struct* tail,
struct desc_struct* hrk,
NAMemory* heap);
static struct desc_struct*
convertRangeDescToPartnsDesc(desc_struct* desc, populatePartnDescT funcPtr, NAMemory* heap)
{
desc_struct* hrk = desc;
desc_struct* prevEndKey = NULL;
struct desc_struct* head = NULL;
struct desc_struct* tail = NULL;
Int32 i=0;
while ( hrk ) {
struct desc_struct *newNode = new (heap) struct desc_struct;
memset(&newNode->header, 0, sizeof(newNode->header));
memset(&newNode->body.partns_desc, 0, sizeof(tail->body.partns_desc));
newNode->header.nodetype = DESC_PARTNS_TYPE;
if ( tail == NULL ) {
head = tail = newNode;
// to satisfy createRangePartitionBoundaries() in NATable.cpp
tail->body.partns_desc.primarypartition = 1;
} else {
tail->header.next = newNode;
tail = tail->header.next;
}
(*funcPtr)(prevEndKey, tail, hrk, heap);
prevEndKey = hrk;
hrk = hrk->header.next;
}
return head;
}
static
PartitioningFunction*
createRangePartitioningFunctionForMultiRegionHBase(Int32 partns,
desc_struct* desc,
const NATable* table,
const NAColumnArray & partKeyColArray,
NAMemory* heap)
{
NodeMap* nodeMap = createNodeMapForHbase(desc, table, -1, heap);
struct desc_struct*
partns_desc = ( table->isHbaseCellTable() || table->isHbaseRowTable()) ?
convertRangeDescToPartnsDesc(desc, populatePartnDescOnFirstKey, heap)
:
convertRangeDescToPartnsDesc(desc, populatePartnDescOnEncodingKey, heap);
return createRangePartitioningFunction
(partns_desc,
partKeyColArray,
nodeMap,
heap);
}
Int32 findDescEntries(desc_struct* desc)
{
Int32 partns = 0;
desc_struct* hrk = desc;
while ( hrk ) {
partns++;
hrk = hrk->header.next;
}
return partns;
}
//
// A single entry point to figure out range partition function for
// Hbase.
//
static
PartitioningFunction*
createRangePartitioningFunctionForHBase(desc_struct* desc,
const NATable* table,
const NAColumnArray & partKeyColArray,
NAMemory* heap)
{
Int32 partns = 0;
if (CmpCommon::getDefault(HBASE_RANGE_PARTITIONING) != DF_OFF)
// First figure out # partns
partns = findDescEntries(desc);
else
partns = 1;
return (partns > 1) ?
createRangePartitioningFunctionForMultiRegionHBase(partns,
desc, table, partKeyColArray, heap)
:
createRangePartitioningFunctionForSingleRegionHBase(
partKeyColArray, heap);
}
static PartitioningFunction * createHivePartitioningFunction
(Int32 numberOfPartitions,
const NAColumnArray & partKeyColArray,
NodeMap* nodeMap,
NAMemory* heap)
{
// ---------------------------------------------------------------------
// Each table has at least 1 partition
// ---------------------------------------------------------------------
if (numberOfPartitions == 1)
return new (heap) SinglePartitionPartitioningFunction(nodeMap, heap);
return new (heap) HivePartitioningFunction(numberOfPartitions, nodeMap, heap);
} // static createHivePartitioningFunction()
// -----------------------------------------------------------------------
// createNodeMap()
// This method is used for creating a node map for all DP2 partitions of
// associated with this table or index.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
static void createNodeMap (desc_struct* part_desc_list,
NodeMap* nodeMap,
NAMemory* heap,
char * tableName,
Int32 tableIdent)
{
// ---------------------------------------------------------------------
// Loop over all partitions creating a DP2 node map entry for each
// partition.
// ---------------------------------------------------------------------
desc_struct* partns_desc = part_desc_list;
CollIndex currentPartition = 0;
if(NOT partns_desc)
{
NodeMapEntry entry =
NodeMapEntry(tableName,NULL,heap,tableIdent);
nodeMap->setNodeMapEntry(currentPartition,entry,heap);
}
else{
while (partns_desc)
{
NodeMapEntry entry(partns_desc->body.partns_desc.partitionname,
partns_desc->body.partns_desc.givenname,
heap,tableIdent);
nodeMap->setNodeMapEntry(currentPartition,entry,heap);
partns_desc = partns_desc->header.next;
currentPartition++;
}
}
// -------------------------------------------------------------------
// If no partitions supplied, create a single partition node map with
// a dummy entry.
// -------------------------------------------------------------------
if (nodeMap->getNumEntries() == 0)
{
NodeMapEntry entry(NodeMapEntry::ACTIVE);
nodeMap->setNodeMapEntry(0,entry,heap);
}
// -------------------------------------------------------------------
// Set the tableIndent into the nodemap itself.
// -------------------------------------------------------------------
nodeMap->setTableIdent(tableIdent);
// -----------------------------------------------------------------------
// See if we need to build a bogus node map with fake volume assignments.
// This will allow us to fake costing code into believing that all
// partitions are distributed evenly among SMP nodes in the cluster.
// -----------------------------------------------------------------------
if (CmpCommon::getDefault(FAKE_VOLUME_ASSIGNMENTS) == DF_ON)
{
// --------------------------------------------------------------------
// Extract number of SMP nodes in the cluster from the defaults table.
// --------------------------------------------------------------------
NADefaults &defs = ActiveSchemaDB()->getDefaults();
CollIndex numOfSMPs = gpClusterInfo->numOfSMPs();
if(CURRSTMT_OPTDEFAULTS->isFakeHardware())
{
numOfSMPs = defs.getAsLong(DEF_NUM_NODES_IN_ACTIVE_CLUSTERS);
}
// ------------------------------------------------------------------
// Determine how many node map entries will be assigned a particular
// node, and also calculate if there are any remaining entries.
// ------------------------------------------------------------------
CollIndex entriesPerNode = nodeMap->getNumEntries() / numOfSMPs;
CollIndex entriesRemaining = nodeMap->getNumEntries() % numOfSMPs;
// ----------------------------------------------------------------
// Assign each node to consecutive entries such that each node has
// approximately the same number of entries.
//
// Any extra entries get assigned evenly to the last remaining
// nodes. For example, if the cluster has 5 nodes and the node map
// has 23 entries, we would assign nodes to entries as follows:
//
// Entries 0 - 3 to node 0. (4 entries)
// Entries 4 - 7 to node 1. (4 entries)
// Entries 8 - 12 to node 2. (5 entries)
// Entries 13 - 17 to node 3. (5 entries)
// Entries 18 - 22 to node 4. (5 entries)
// ----------------------------------------------------------------
CollIndex mapIdx = 0;
for (CollIndex nodeIdx = 0; nodeIdx < numOfSMPs; nodeIdx++)
{
if (nodeIdx == numOfSMPs - entriesRemaining)
{
entriesPerNode += 1;
}
for (CollIndex entryIdx = 0; entryIdx < entriesPerNode; entryIdx++)
{
nodeMap->setNodeNumber(mapIdx,nodeIdx);
mapIdx += 1;
}
}
}
} // static createNodeMap()
#pragma warn(1506) // warning elimination
#pragma nowarn(1506) // warning elimination
static void createNodeMap (hive_tbl_desc* hvt_desc,
NodeMap* nodeMap,
NAMemory* heap,
char * tableName,
Int32 tableIdent)
{
// ---------------------------------------------------------------------
// Loop over all hive storage (partition file ) creating a node map
// entry for each partition.
// ---------------------------------------------------------------------
CMPASSERT(nodeMap->type() == NodeMap::HIVE);
hive_sd_desc* sd_desc = hvt_desc->getSDs();
CollIndex currentPartition = 0;
// char buf[500];
Int32 i= 0;
while (sd_desc)
{
HiveNodeMapEntry entry(NodeMapEntry::ACTIVE, heap);
nodeMap->setNodeMapEntry(currentPartition++,entry,heap);
sd_desc = sd_desc->next_;
}
// -------------------------------------------------------------------
// If no partitions supplied, create a single partition node map with
// a dummy entry.
// -------------------------------------------------------------------
if (nodeMap->getNumEntries() == 0)
{
HiveNodeMapEntry entry(NodeMapEntry::ACTIVE, heap);
nodeMap->setNodeMapEntry(0,entry,heap);
}
// -------------------------------------------------------------------
// Set the tableIndent into the nodemap itself.
// -------------------------------------------------------------------
nodeMap->setTableIdent(tableIdent);
// No fake volumn assignment because Hive' partitions are not hash
// based, there is no balance of data among all partitions.
} // static createNodeMap()
#pragma warn(1506) // warning elimination
//-------------------------------------------------------------------------
// This function checks if a table/index or any of its partitions are
// remote. This is required to determine the size of the EidRootBuffer
// to be sent to DP2 - Expand places limits on the size of messages
// - approx 31000 for messages to remote nodes, and 56000 for messages
// on the local node.
//-------------------------------------------------------------------------
#pragma nowarn(262) // warning elimination
static NABoolean checkRemote(desc_struct* part_desc_list,
char * tableName)
{
return TRUE;
}
#pragma warn(262) // warning elimination
// warning elimination (removed "inline")
static NAString makeTableName(const NATable *table,
const columns_desc_struct *column_desc)
{
return NAString(
table ?
table->getTableName().getQualifiedNameAsAnsiString().data() :
column_desc->tablename ?
column_desc->tablename : "");
}
// warning elimination (removed "inline")
static NAString makeColumnName(const NATable *table,
const columns_desc_struct *column_desc)
{
NAString nam(makeTableName(table, column_desc));
if (!nam.isNull()) nam += ".";
nam += column_desc->colname;
return nam;
}
// -----------------------------------------------------------------------
// Method for creating NAType from desc_struct.
// -----------------------------------------------------------------------
NABoolean createNAType(columns_desc_struct *column_desc /*IN*/,
const NATable *table /*IN*/,
NAType *&type /*OUT*/,
NAMemory *heap /*IN*/,
Lng32 * errorCode
)
{
//
// Compute the NAType for this column
//
#define REC_INTERVAL REC_MIN_INTERVAL
DataType datatype = column_desc->datatype;
if (REC_MIN_INTERVAL <= datatype && datatype <= REC_MAX_INTERVAL)
datatype = REC_INTERVAL;
Lng32 charCount = column_desc->length;
if ( DFS2REC::isAnyCharacter(column_desc->datatype) )
{
if ( CharInfo::isCharSetSupported(column_desc->character_set) == FALSE ) {
if (!errorCode)
{
*CmpCommon::diags() << DgSqlCode(-4082)
<< DgTableName(makeTableName(table, column_desc));
}
else
{
*errorCode = 4082;
}
return TRUE; // error
}
if ( CharInfo::is_NCHAR_MP(column_desc->character_set) )
charCount /= SQL_DBCHAR_SIZE;
}
switch (datatype)
{
case REC_BPINT_UNSIGNED :
type = new (heap)
SQLBPInt(column_desc->precision, column_desc->null_flag, FALSE, heap);
break;
case REC_BIN16_SIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLSmall(TRUE,
column_desc->null_flag,
heap
);
break;
case REC_BIN16_UNSIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
FALSE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLSmall(FALSE,
column_desc->null_flag,
heap
);
break;
case REC_BIN32_SIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLInt(TRUE,
column_desc->null_flag,
heap
);
break;
case REC_BIN32_UNSIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
FALSE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLInt(FALSE,
column_desc->null_flag,
heap
);
break;
case REC_BIN64_SIGNED:
if (column_desc->precision > 0)
type = new (heap)
SQLNumeric(column_desc->length,
column_desc->precision,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
else
type = new (heap)
SQLLargeInt(TRUE,
column_desc->null_flag,
heap
);
break;
case REC_DECIMAL_UNSIGNED:
type = new (heap)
SQLDecimal(column_desc->length,
column_desc->scale,
FALSE,
column_desc->null_flag,
heap
);
break;
case REC_DECIMAL_LSE:
type = new (heap)
SQLDecimal(column_desc->length,
column_desc->scale,
TRUE,
column_desc->null_flag,
heap
);
break;
case REC_NUM_BIG_UNSIGNED:
type = new (heap)
SQLBigNum(column_desc->precision,
column_desc->scale,
TRUE, // is a real bignum
FALSE,
column_desc->null_flag,
heap
);
break;
case REC_NUM_BIG_SIGNED:
type = new (heap)
SQLBigNum(column_desc->precision,
column_desc->scale,
TRUE, // is a real bignum
TRUE,
column_desc->null_flag,
heap
);
break;
case REC_TDM_FLOAT32:
type = new (heap)
SQLRealTdm(column_desc->null_flag, heap, column_desc->precision);
break;
case REC_TDM_FLOAT64:
type = new (heap)
SQLDoublePrecisionTdm(column_desc->null_flag, heap, column_desc->precision);
break;
case REC_FLOAT32:
type = new (heap)
SQLReal(column_desc->null_flag, heap, column_desc->precision);
break;
case REC_FLOAT64:
type = new (heap)
SQLDoublePrecision(column_desc->null_flag, heap, column_desc->precision);
break;
case REC_BYTE_F_DOUBLE:
charCount /= SQL_DBCHAR_SIZE; // divide the storage length by 2
type = new (heap)
SQLChar(charCount,
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
FALSE,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT
);
break;
case REC_BYTE_F_ASCII:
if (column_desc->character_set == CharInfo::UTF8 ||
(column_desc->character_set == CharInfo::SJIS &&
column_desc->encoding_charset == CharInfo::SJIS))
{
Lng32 maxBytesPerChar = CharInfo::maxBytesPerChar(column_desc->character_set);
Lng32 sizeInChars = charCount ; // Applies when CharLenUnit == BYTES
if ( column_desc->precision > 0 )
sizeInChars = column_desc->precision;
type = new (heap)
SQLChar(CharLenInfo(sizeInChars, charCount/*in_bytes*/),
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
FALSE, // varLenFlag
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT, // Coercibility
column_desc->encoding_charset
);
}
else // keep the old behavior
type = new (heap)
SQLChar(charCount,
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
FALSE,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT
);
break;
case REC_BYTE_V_DOUBLE:
charCount /= SQL_DBCHAR_SIZE; // divide the storage length by 2
// fall thru
case REC_BYTE_V_ASCII:
if (column_desc->character_set == CharInfo::SJIS ||
column_desc->character_set == CharInfo::UTF8)
{
Lng32 maxBytesPerChar = CharInfo::maxBytesPerChar(column_desc->character_set);
Lng32 sizeInChars = charCount ; // Applies when CharLenUnit == BYTES
if ( column_desc->precision > 0 )
sizeInChars = column_desc->precision;
type = new (heap)
SQLVarChar(CharLenInfo(sizeInChars, charCount/*in_bytes*/),
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT, // Coercibility
column_desc->encoding_charset
);
}
else // keep the old behavior
type = new (heap)
SQLVarChar(charCount,
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT
);
break;
case REC_BYTE_V_ASCII_LONG:
type = new (heap)
SQLLongVarChar(charCount,
FALSE,
column_desc->null_flag,
column_desc->upshift,
column_desc->caseinsensitive,
column_desc->character_set,
column_desc->collation_sequence,
CharInfo::IMPLICIT
);
break;
case REC_DATETIME:
type = DatetimeType::constructSubtype(
column_desc->null_flag,
column_desc->datetimestart,
column_desc->datetimeend,
column_desc->datetimefractprec,
heap
);
CMPASSERT(type);
if (!type->isSupportedType())
{
column_desc->defaultClass = COM_NO_DEFAULT; // can't set a default for these, either.
// 4030 Column is an unsupported combination of datetime fields
if (!errorCode)
{
*CmpCommon::diags() << DgSqlCode(4030)
<< DgColumnName(makeColumnName(table, column_desc))
<< DgInt0(column_desc->datetimestart)
<< DgInt1(column_desc->datetimeend)
<< DgInt2(column_desc->datetimefractprec);
}
else
{
*errorCode = 4030;
}
}
break;
case REC_INTERVAL:
type = new (heap)
SQLInterval(column_desc->null_flag,
column_desc->datetimestart,
column_desc->intervalleadingprec,
column_desc->datetimeend,
column_desc->datetimefractprec,
heap
);
CMPASSERT(type);
if (! ((SQLInterval *)type)->checkValid(CmpCommon::diags()))
return TRUE; // error
if (!type->isSupportedType())
{
column_desc->defaultClass = COM_NO_DEFAULT; // can't set a default for these, either.
if (!errorCode)
*CmpCommon::diags() << DgSqlCode(3044) << DgString0(column_desc->colname);
else
*errorCode = 3044;
}
break;
case REC_BLOB :
type = new (heap)
SQLBlob(column_desc->precision,Lob_Invalid_Storage,
column_desc->null_flag);
break;
case REC_CLOB :
type = new (heap)
SQLClob(column_desc->precision,Lob_Invalid_Storage,
column_desc->null_flag);
break;
default:
{
// 4031 Column %s is an unknown data type, %d.
if (!errorCode)
{
*CmpCommon::diags() << DgSqlCode(-4031)
<< DgColumnName(makeColumnName(table, column_desc))
<< DgInt0(column_desc->datatype);
}
else
{
*errorCode = 4031;
}
return TRUE; // error
}
} // end switch (column_desc->datatype)
CMPASSERT(type);
if (type->getTypeQualifier() == NA_CHARACTER_TYPE) {
CharInfo::Collation co = ((CharType *)type)->getCollation();
// a "mini-cache" to avoid proc call, for perf
static THREAD_P CharInfo::Collation cachedCO = CharInfo::UNKNOWN_COLLATION;
static THREAD_P Int32 cachedFlags = CollationInfo::ALL_NEGATIVE_SYNTAX_FLAGS;
if (cachedCO != co) {
cachedCO = co;
cachedFlags = CharInfo::getCollationFlags(co);
}
if (cachedFlags & CollationInfo::ALL_NEGATIVE_SYNTAX_FLAGS) {
//
//## The NCHAR/COLLATE NSK-Rel1 project is forced to disallow all user-
// defined collations here. What we can't handle is:
// - full support! knowledge of how to really collate!
// - safe predicate-ability of collated columns, namely
// . ORDER/SEQUENCE/SORT BY
// MIN/MAX
// < <= > >=
// These *would* have been disallowed by the
// CollationInfo::ORDERED_CMP_ILLEGAL flag.
// . DISTINCT
// GROUP BY
// = <>
// These *would* have been disallowed by the
// CollationInfo::EQ_NE_CMP_ILLEGAL flag.
// . SELECTing a collated column which is a table or index key
// We *would* have done full table scan only, based on flag
// . INS/UPD/DEL involving a collated column which is a key
// We *would* have had to disallow this, based on flag;
// otherwise we would position in wrong and corrupt either
// our partitioning or b-trees or both.
// See the "MPcollate.doc" document, and
// see sqlcomp/DefaultValidator.cpp ValidateCollationList comments.
//
{
// 4069 Column TBL.COL uses unsupported collation COLLAT.
if (!errorCode)
{
*CmpCommon::diags() << DgSqlCode(-4069)
<< DgColumnName(makeColumnName(table, column_desc));
}
else
{
*errorCode= 4069;
}
return TRUE; // error
}
}
}
return FALSE; // no error
} // createNAType()
// -----------------------------------------------------------------------
// Method for inserting new NAColumn entries in NATable::colArray_,
// one for each column_desc in the list supplied as input.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
NABoolean createNAColumns(desc_struct *column_desc_list /*IN*/,
NATable *table /*IN*/,
NAColumnArray &colArray /*OUT*/,
NAMemory *heap /*IN*/)
{
NAType *type;
ColumnClass colClass;
while (column_desc_list)
{
columns_desc_struct * column_desc = &column_desc_list->body.columns_desc;
NABoolean isMvSystemAdded = FALSE;
NABoolean hasSystemColumnAsUserColumn = FALSE;
if (NAColumn::createNAType(column_desc, table, type, heap))
return TRUE;
// Get the column class. The column will either be a system column or a
// user column.
//
switch (column_desc->colclass)
{
case 'S':
{
if ( (CmpCommon::getDefault(OVERRIDE_SYSKEY)==DF_ON) &&
(table && table->getSpecialType() != ExtendedQualName::VIRTUAL_TABLE) )
{
colClass = USER_COLUMN;
hasSystemColumnAsUserColumn = TRUE;
}
else
colClass = SYSTEM_COLUMN;
}
break;
case 'U':
colClass = USER_COLUMN;
break;
case 'A':
case 'C':
colClass = USER_COLUMN;
break;
case 'M': // MVs --
colClass = USER_COLUMN;
isMvSystemAdded = TRUE;
break;
default:
{
// 4032 column is an unknown class (not sys nor user)
*CmpCommon::diags() << DgSqlCode(-4032)
<< DgColumnName(makeColumnName(table, column_desc))
<< DgInt0(column_desc->colclass);
return TRUE; // error
}
} // end switch (column_desc->colclass)
// Create an NAColumn and insert it into the NAColumn array.
//
NAColumn *newColumn = NULL;
if (column_desc->colname[0] != '\0')
{
// Standard named column from ReadTableDef...
CMPASSERT(column_desc->colnumber >= 0);
char* defaultValue = column_desc->defaultvalue;
char* heading = column_desc->heading;
char* computed_column_text = column_desc->computed_column_text;
NABoolean isSaltColumn = FALSE;
NABoolean isDivisioningColumn = FALSE;
if (column_desc->defaultClass == COM_ALWAYS_COMPUTE_COMPUTED_COLUMN_DEFAULT)
{
if (column_desc->colFlags & SEABASE_COLUMN_IS_SALT)
isSaltColumn = TRUE;
if (column_desc->colFlags & SEABASE_COLUMN_IS_DIVISION)
isDivisioningColumn = TRUE;
if (!computed_column_text)
{
computed_column_text = defaultValue;
defaultValue = NULL;
}
}
if(ActiveSchemaDB()->getNATableDB()->cachingMetaData()){
//make copies of stuff onto the heap passed in
if(defaultValue){
defaultValue = (char*) new (heap) char[strlen(defaultValue)+1];
strcpy(defaultValue, column_desc->defaultvalue);
}
if(heading){
Int32 headingLength = str_len(heading)+1;
heading = new (heap) char [headingLength];
memcpy(heading,column_desc->heading,headingLength);
}
if(computed_column_text){
char * computed_column_text_temp = computed_column_text;
Int32 cctLength = str_len(computed_column_text)+1;
computed_column_text = new (heap) char [cctLength];
memcpy(computed_column_text,computed_column_text_temp,cctLength);
}
}
newColumn = new (heap)
NAColumn(column_desc->colname,
column_desc->colnumber,
type,
heap,
table,
colClass,
column_desc->defaultClass,
defaultValue,
heading,
column_desc->upshift,
((column_desc->colclass == 'A') ||
(column_desc->colclass == 'C')),
COM_UNKNOWN_DIRECTION,
FALSE,
NULL,
column_desc->stored_on_disk,
computed_column_text,
isSaltColumn,
isDivisioningColumn,
(column_desc->colclass == 'C'));
}
else
{
CMPASSERT(0);
}
if (isMvSystemAdded)
newColumn->setMvSystemAddedColumn();
if (table &&
((table->isSeabaseTable()) ||
(table->isHbaseCellTable()) ||
(table->isHbaseRowTable())))
{
if (column_desc->hbaseColFam)
newColumn->setHbaseColFam(column_desc->hbaseColFam);
if (column_desc->hbaseColQual)
newColumn->setHbaseColQual(column_desc->hbaseColQual);
newColumn->setHbaseColFlags(column_desc->hbaseColFlags);
}
if (table != NULL)
{
if (newColumn->isAddedColumn())
table->setHasAddedColumn(TRUE);
if (newColumn->getType()->isVaryingLen())
table->setHasVarcharColumn(TRUE);
if (hasSystemColumnAsUserColumn)
table->setSystemColumnUsedAsUserColumn(TRUE) ;
if (newColumn->getType()->isLob())
table->setHasLobColumn(TRUE);
if (CmpSeabaseDDL::isEncodingNeededForSerialization(newColumn))
table->setHasSerializedEncodedColumn(TRUE);
if (CmpSeabaseDDL::isSerialized(newColumn->getHbaseColFlags()))
table->setHasSerializedColumn(TRUE);
}
colArray.insert(newColumn);
column_desc_list = column_desc_list->header.next;
} // end while
return FALSE; // no error
} // createNAColumns()
#pragma warn(1506) // warning elimination
NAType* getSQColTypeForHive(const char* hiveType, NAMemory* heap)
{
if ( !strcmp(hiveType, "tinyint") ||
!strcmp(hiveType, "smallint"))
return new (heap) SQLSmall(TRUE /* neg */, TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "int"))
return new (heap) SQLInt(TRUE /* neg */, TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "bigint"))
return new (heap) SQLLargeInt(TRUE /* neg */, TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "string"))
{
Int32 len = CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH);
NAString hiveCharset =
ActiveSchemaDB()->getDefaults().getValue(HIVE_DEFAULT_CHARSET);
return new (heap) SQLVarChar(CharLenInfo((hiveCharset == CharInfo::UTF8 ? 0 : len),len),
TRUE, // allow NULL
FALSE, // not upshifted
FALSE, // not case-insensitive
CharInfo::getCharSetEnum(hiveCharset),
CharInfo::DefaultCollation,
CharInfo::IMPLICIT);
}
if ( !strcmp(hiveType, "float"))
return new (heap) SQLReal(TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "double"))
return new (heap) SQLDoublePrecision(TRUE /* allow NULL*/, heap);
if ( !strcmp(hiveType, "timestamp"))
return new (heap) SQLTimestamp(TRUE /* allow NULL */ , 6, heap);
return NULL;
}
NABoolean createNAColumns(struct hive_column_desc* hcolumn /*IN*/,
NATable *table /*IN*/,
NAColumnArray &colArray /*OUT*/,
NAMemory *heap /*IN*/)
{
// Assume that hive_struct->conn has the right connection,
// and tblID and sdID has be properly set.
// In the following loop, we need to extract the column information.
while (hcolumn) {
NAType* natype = getSQColTypeForHive(hcolumn->type_, heap);
if ( !natype ) {
*CmpCommon::diags()
<< DgSqlCode(-1204)
<< DgString0(hcolumn->type_);
return TRUE;
}
NAString colName(hcolumn->name_);
colName.toUpper();
NAColumn* newColumn = new (heap)
NAColumn(colName.data(),
hcolumn->intIndex_,
natype,
heap,
table,
USER_COLUMN, // colClass,
COM_NULL_DEFAULT ,//defaultClass,
(char*)"", // defaultValue,
(char*)"", // heading,
FALSE, // column_desc->upshift,
FALSE, // added column
COM_UNKNOWN_DIRECTION,
FALSE, // isOptional
NULL, // routineParamType
TRUE, // column_desc->stored_on_disk,
(char*)"" //computed_column_text
);
if (table != NULL)
{
if (newColumn->isAddedColumn())
table->setHasAddedColumn(TRUE);
if (newColumn->getType()->isVaryingLen())
table->setHasVarcharColumn(TRUE);
}
colArray.insert(newColumn);
hcolumn= hcolumn->next_;
}
return FALSE; // no error
} // createNAColumns()
#pragma warn(1506) // warning elimination
NABoolean createNAKeyColumns(desc_struct *keys_desc_list /*IN*/,
NAColumnArray &colArray /*IN*/,
NAColumnArray &keyColArray /*OUT*/,
CollHeap *heap /*IN*/)
{
const desc_struct *keys_desc = keys_desc_list;
while (keys_desc)
{
Int32 tablecolnumber = keys_desc->body.keys_desc.tablecolnumber;
NAColumn *indexColumn = colArray.getColumn(tablecolnumber);
SortOrdering order = NOT_ORDERED;
keyColArray.insert(indexColumn);
order = keys_desc->body.keys_desc.ordering ? DESCENDING : ASCENDING;
keyColArray.setAscending(keyColArray.entries()-1, order == ASCENDING);
// Remember that this columns is part of the clustering
// key and remember its key ordering (asc or desc)
indexColumn->setClusteringKey(order);
keys_desc = keys_desc->header.next;
} // end while (keys_desc)
return FALSE;
}
// ************************************************************************
// The next two methods are used for code related to indexes hiding.
// In particular, this is related to hiding remote indexes having the same
// name as the local name. Here we mark the remote indexes that have the
// same local name and in addition share the following:
// (1) both share the same index columns
// (2) both have the same partioning keys
//
// The method naStringHashFunc is used by the NAHashDictionary<NAString, Index>
// that maps indexname to the corresponding list of indexes having that name
//
//*************************************************************************
ULng32 naStringHashFunc(const NAString& indexName)
{
ULng32 hash= (ULng32) NAString::hash(indexName);
return hash;
}
//*************************************************************************
// The method processDuplicateNames() is called by createNAFileSet() for
// tables having duplicate remote indexes.
//*************************************************************************
// LCOV_EXCL_START :nsk
void processDuplicateNames(NAHashDictionaryIterator<NAString, Int32> &Iter,
NAFileSetList & indexes,
char *localNodeName)
{
return;
} // processDuplicateNames()
// LCOV_EXCL_STOP
// -----------------------------------------------------------------------
// Method for:
// - inserting new NAFileSet entries in NATable::indexes_
// one for each index in the list supplied as input. It also
// returns a pointer to the NAFileSet for the clustering index
// as well as the primary index on this NATable.
// - inserting new NAFileSet entries in NATable::vertParts_
// one for each vertical partition in the list supplied as input.
// -----------------------------------------------------------------------
#pragma nowarn(1506) // warning elimination
static
NABoolean createNAFileSets(desc_struct * table_desc /*IN*/,
const NATable * table /*IN*/,
const NAColumnArray & colArray /*IN*/,
NAFileSetList & indexes /*OUT*/,
NAFileSetList & vertParts /*OUT*/,
NAFileSet * & clusteringIndex /*OUT*/,
LIST(CollIndex) & tableIdList /*OUT*/,
NAMemory* heap,
BindWA * bindWA,
NAColumnArray &newColumns, /*OUT */
Int32 *maxIndexLevelsPtr = NULL)
{
// ---------------------------------------------------------------------
// Add index/vertical partition (VP) information; loop over all indexes/
// VPs, but start with the clustering key, then process all others.
// The clustering key has a keytag 0.
// ---------------------------------------------------------------------
// this dictionary is used for hiding remote indexes; the remote indexes
// are hidden when the CQD INDEX_ELIMINATION_LEVEL is set to aggressive
NAHashDictionary<NAString, Int32> *indexFilesetMap =
new (heap) NAHashDictionary<NAString, Int32>
(naStringHashFunc, 101, FALSE, CmpCommon::statementHeap());
NAList<NAString *> stringList (CmpCommon::statementHeap());
desc_struct *indexes_desc = table_desc->body.table_desc.indexes_desc;
while (indexes_desc AND indexes_desc->body.indexes_desc.keytag)
indexes_desc = indexes_desc->header.next;
// must have a clustering key if not view
CMPASSERT((indexes_desc AND !indexes_desc->body.indexes_desc.keytag) OR
(table_desc->body.table_desc.views_desc));
NABoolean isTheClusteringKey = TRUE;
NABoolean isVerticalPartition;
NABoolean hasRemotePartition = FALSE;
CollIndex numClusteringKeyColumns = 0;
NABoolean tableAlignedRowFormat = table->isSQLMXAlignedTable();
// get hbase table index level and blocksize. costing code uses index_level
// and block size to estimate cost. Here we make a JNI call to read index level
// and block size. If there is a need to avoid reading from Hbase layer,
// HBASE_INDEX_LEVEL cqd can be used to disable JNI call. User can
// set this CQD to reflect desired index_level for his query.
// Default value of HBASE_BLOCK_SIZE is 64KB, when not reading from Hbase layer.
Int32 hbtIndexLevels = 0;
Int32 hbtBlockSize = 0;
NABoolean res = false;
if (table->isHbaseTable())
{
// get default values of index_level and block size
hbtIndexLevels = (ActiveSchemaDB()->getDefaults()).getAsLong(HBASE_INDEX_LEVEL);
hbtBlockSize = (ActiveSchemaDB()->getDefaults()).getAsLong(HBASE_BLOCK_SIZE);
// call getHbaseTableInfo if index level is set to 0
if (hbtIndexLevels == 0)
res = table->getHbaseTableInfo(hbtIndexLevels, hbtBlockSize);
}
// Set up global cluster information. This global information always
// gets put on the context heap.
//
// Note that this function call will probably not do anything, since
// this cluster information is set up when arkcmp is created; however,
// it's certainly better to have this call here, rather than in a
// doubly-nested loop below where it used to be ...
// $$$ probably not necessary to call this even once ...
setUpClusterInfo(CmpCommon::contextHeap());
NABoolean doHash2 =
(CmpCommon::getDefault(HBASE_HASH2_PARTITIONING) != DF_OFF &&
!(bindWA && bindWA->isTrafLoadPrep()));
// ---------------------------------------------------------------------
// loop over all indexes/VPs defined on the base table
// ---------------------------------------------------------------------
while (indexes_desc)
{
Lng32 numberOfFiles = 1; // always at least 1
NAColumn * indexColumn; // an index/VP key column
NAColumn * newIndexColumn;
NAFileSet * newIndex; // a new file set
//hardcoding statement heap here, previosly the following calls
//used the heap that was passed in (which was always statement heap)
//Now with the introduction of NATable caching we pass in the NATable
//heap and these guys should not be created on the NATable heap, they
//should be created on the statement heap. Only the array objects
//will be on the statement heap whatever is in the arrays i.e. NAColumns
//will still be where ever they were before.
NAColumnArray allColumns(CmpCommon::statementHeap());// all columns that belong to an index
NAColumnArray indexKeyColumns(CmpCommon::statementHeap());// the index key columns
NAColumnArray saveNAColumns(CmpCommon::statementHeap());// save NAColums of secondary index columns
NAColumnArray partitioningKeyColumns(CmpCommon::statementHeap());// the partitioning key columns
PartitioningFunction * partFunc = NULL;
// is this an index or is it really a VP?
isVerticalPartition = indexes_desc->body.indexes_desc.isVerticalPartition;
NABoolean isPacked = indexes_desc->body.indexes_desc.isPacked;
NABoolean indexAlignedRowFormat = (indexes_desc->body.indexes_desc.rowFormat == COM_ALIGNED_FORMAT_TYPE);
NABoolean isNotAvailable =
indexes_desc->body.indexes_desc.notAvailable;
ItemExprList hbaseSaltColumnList(CmpCommon::statementHeap());
Int64 numOfSaltedPartitions = 0;
// ---------------------------------------------------------------------
// loop over the clustering key columns of the index
// ---------------------------------------------------------------------
const desc_struct *keys_desc = indexes_desc->body.indexes_desc.keys_desc;
while (keys_desc)
{
// Add an index/VP key column.
//
// If this is an alternate index or a VP, the keys table actually
// describes all columns of the index or VP. For nonunique
// indexes, all index columns form the key, while for unique
// alternate indexes the last "numClusteringKeyColumns"
// columns are non-key columns, they are just the clustering
// key columns used to find the base table record. This is
// true for both SQL/MP and SQL/MX tables at this time.
// To make these assumptions is not optimal, but the
// desc_structs that are used as input are a historical
// leftover from SQL/MP and therefore aren't set up very
// well to describe index columns and index keys. Some day
// we might consider a direct conversion from the MX catalog
// manager (SOL) objects into NATables and NAFilesets.
//
// NOTE:
// The last "numClusteringKeyColumns" key columns
// of a unique alternate index (which ARE described in the
// keys_desc) get deleted later.
Int32 tablecolnumber = keys_desc->body.keys_desc.tablecolnumber;
indexColumn = colArray.getColumn(tablecolnumber);
if ((table->isHbaseTable()) &&
((indexes_desc->body.indexes_desc.keytag != 0) ||
(indexAlignedRowFormat && indexAlignedRowFormat != tableAlignedRowFormat)))
{
newIndexColumn = new(heap) NAColumn(*indexColumn);
newIndexColumn->setIndexColName(keys_desc->body.keys_desc.keyname);
newIndexColumn->setHbaseColFam(keys_desc->body.keys_desc.hbaseColFam);
newIndexColumn->setHbaseColQual(keys_desc->body.keys_desc.hbaseColQual);
newIndexColumn->resetSerialization();
saveNAColumns.insert(indexColumn);
newColumns.insert(newIndexColumn);
indexColumn = newIndexColumn;
}
SortOrdering order = NOT_ORDERED;
// as mentioned above, for all alternate indexes we
// assume at first that all columns are key columns
// and we make adjustments later
indexKeyColumns.insert(indexColumn);
order = keys_desc->body.keys_desc.ordering ?
DESCENDING : ASCENDING;
indexKeyColumns.setAscending(indexKeyColumns.entries() - 1,
order == ASCENDING);
if ( table->isHbaseTable() &&
indexColumn->isSaltColumn() )
{
// examples of the saltClause string:
// 1. HASH2PARTFUNC(CAST(L_ORDERKEY AS INT NOT NULL) FOR 4)
// 2. HASH2PARTFUNC(CAST(A AS INT NOT NULL),CAST(B AS INT NOT NULL) FOR 4)
const char* saltClause = indexColumn->getComputedColumnExprString();
Parser parser(CmpCommon::context());
ItemExpr* saltExpr = parser.getItemExprTree(saltClause,
strlen(saltClause),
CharInfo::ISO88591);
CMPASSERT(saltExpr &&
saltExpr->getOperatorType() == ITM_HASH2_DISTRIB);
// get the # of salted partitions from saltClause
ItemExprList csList(CmpCommon::statementHeap());
saltExpr->findAll(ITM_CONSTANT, csList, FALSE, FALSE);
// get #salted partitions from last ConstValue in the list
if ( csList.entries() > 0 ) {
ConstValue* ct = (ConstValue*)csList[csList.entries()-1];
if ( ct->canGetExactNumericValue() ) {
numOfSaltedPartitions = ct->getExactNumericValue();
}
}
// collect all ColReference objects into hbaseSaltColumnList.
saltExpr->findAll(ITM_REFERENCE, hbaseSaltColumnList, FALSE, FALSE);
}
if (isTheClusteringKey)
{
// Since many columns of the base table may not be in the
// clustering key, we'll delay setting up the list of all
// columns in the index until later, so we can then just
// add them all at once.
// Remember that this columns is part of the clustering
// key and remember its key ordering (asc or desc)
indexColumn->setClusteringKey(order);
numClusteringKeyColumns++;
}
else
{
// Since all columns in the index are guaranteed to be in
// the key, we can set up the list of all columns in the index
// now just by adding every key column.
allColumns.insert(indexColumn);
}
keys_desc = keys_desc->header.next;
} // end while (keys_desc)
// ---------------------------------------------------------------------
// Loop over the non key columns of the index/vertical partition.
// These columns get added to the list of all the columns for the index/
// VP. Their length also contributes to the total record length.
// ---------------------------------------------------------------------
const desc_struct *non_keys_desc =
indexes_desc->body.indexes_desc.non_keys_desc;
while (non_keys_desc)
{
Int32 tablecolnumber = non_keys_desc->body.keys_desc.tablecolnumber;
indexColumn = colArray.getColumn(tablecolnumber);
if ((table->isHbaseTable()) &&
((indexes_desc->body.indexes_desc.keytag != 0) ||
(indexAlignedRowFormat && indexAlignedRowFormat != tableAlignedRowFormat)))
{
newIndexColumn = new(heap) NAColumn(*indexColumn);
if (non_keys_desc->body.keys_desc.keyname)
newIndexColumn->setIndexColName(non_keys_desc->body.keys_desc.keyname);
newIndexColumn->setHbaseColFam(non_keys_desc->body.keys_desc.hbaseColFam);
newIndexColumn->setHbaseColQual(non_keys_desc->body.keys_desc.hbaseColQual);
newIndexColumn->resetSerialization();
indexColumn = newIndexColumn;
newColumns.insert(newIndexColumn);
}
allColumns.insert(indexColumn);
non_keys_desc = non_keys_desc->header.next;
} // end while (non_keys_desc)
desc_struct *files_desc;
NABoolean isSystemTable;
if (isTheClusteringKey)
{
// We haven't set up the list of all columns in the clustering
// index yet, so do that now. Do this by adding all
// the base table columns to the columns of the clustering index.
// Don't add a column, of course, if somehow it has already
// been added.
for (CollIndex bcolNo = 0; bcolNo < colArray.entries(); bcolNo++)
{
NAColumn *baseCol = colArray[bcolNo];
if (NOT allColumns.contains(baseCol))
{
// add this base column
allColumns.insert(baseCol);
}
} // end for
files_desc = table_desc->body.table_desc.files_desc;
isSystemTable = table_desc->body.table_desc.issystemtablecode;
// Record length of clustering key is the same as that of the base table record
indexes_desc->body.indexes_desc.record_length = table_desc->body.table_desc.record_length;
} // endif (isTheClusteringKey)
else
{
if (indexes_desc->body.indexes_desc.unique)
{
// As mentioned above, if this is a unique index,
// the last numClusteringKeyColumns are actually not
// part of the KEY of the index, they are just part of
// the index record. Since there are keys_desc entries
// for these columns, remove the correspoinding entries
// from indexKeyColumns
// $$$$ Commenting this out, since Catman and DP2 handle index
// keys differently: they always assume that all index columns
// are part of the key. Somehow DP2 is told which prefix length
// of the key is actually the unique part.
// $$$$ This could be enabled when key lengths and record lengths
// are different.
// for (CollIndex i = 0; i < numClusteringKeyColumns; i++)
// indexKeyColumns.removeAt(indexKeyColumns.entries() - 1);
}
files_desc = indexes_desc->body.indexes_desc.files_desc;
isSystemTable = indexes_desc->body.indexes_desc.issystemtablecode;
} // endif (NOT isTheClusteringKey)
// -------------------------------------------------------------------
// Build the partition attributes for this table.
//
// Usually the partitioning key columns are the same as the
// clustering key columns. If no partitioning key columns have
// been specified then the partitioning key columns will be assumed
// to be the same as the clustering key columns. Otherwise, they
// could be the same but may not necessarily be the same.
//
// We will ASSUME here that NonStop SQL/MP or the simulator will not
// put anything into partitioning keys desc and only SQL/MX will. So
// we don't have to deal with keytag columns here.
// -------------------------------------------------------------------
const desc_struct *partitioning_keys_desc =
indexes_desc->body.indexes_desc.partitioning_keys_desc;
// the key columns that build the salt column for HBase table
NAColumnArray hbaseSaltOnColumns(CmpCommon::statementHeap());
if (partitioning_keys_desc)
{
keys_desc = partitioning_keys_desc;
while (keys_desc)
{
Int32 tablecolnumber = keys_desc
->body.keys_desc.tablecolnumber;
indexColumn = colArray.getColumn(tablecolnumber);
partitioningKeyColumns.insert(indexColumn);
SortOrdering order = keys_desc
->body.keys_desc.ordering ?
DESCENDING : ASCENDING;
partitioningKeyColumns.setAscending
(partitioningKeyColumns.entries() - 1,
order == ASCENDING);
keys_desc = keys_desc->header.next;
} // end while (keys_desc)
}
else {
partitioningKeyColumns = indexKeyColumns;
// compute the partition key columns for HASH2 partitioning scheme
// for a salted HBase table. Later on, we will replace
// partitioningKeyColumns with the column list computed here if
// the desired partitioning schema is HASH2.
for (CollIndex i=0; i<hbaseSaltColumnList.entries(); i++ )
{
ColReference* cRef = (ColReference*)hbaseSaltColumnList[i];
const NAString& colName = (cRef->getColRefNameObj()).getColName();
NAColumn *col = allColumns.getColumn(colName.data()) ;
hbaseSaltOnColumns.insert(col);
}
}
// Create DP2 node map for partitioning function.
NodeMap* nodeMap = NULL;
//increment for each table/index to create unique identifier
cmpCurrentContext->incrementTableIdent();
// NB: Just in case, we made a call to setupClusterInfo at the
// beginning of this function.
desc_struct * partns_desc;
Int32 indexLevels = 1;
Int32 blockSize = indexes_desc->body.indexes_desc.blocksize;
if (files_desc)
{
if( (table->getSpecialType() != ExtendedQualName::VIRTUAL_TABLE AND
/*
table->getSpecialType() != ExtendedQualName::ISP_TABLE AND
*/
(NOT table->isHbaseTable()))
OR files_desc->body.files_desc.partns_desc )
{
nodeMap = new (heap) NodeMap(heap);
createNodeMap(files_desc->body.files_desc.partns_desc,
nodeMap,
heap,
table_desc->body.table_desc.tablename,
cmpCurrentContext->getTableIdent());
tableIdList.insert(CollIndex(cmpCurrentContext->getTableIdent()));
}
// Check whether the index has any remote partitions.
if (checkRemote(files_desc->body.files_desc.partns_desc,
indexes_desc->body.indexes_desc.indexname))
hasRemotePartition = TRUE;
else
hasRemotePartition = FALSE;
// Sol: 10-030703-7600. Earlier we assumed that the table is
// partitioned same as the indexes, hence we used table partitioning
// to create partitionining function. But this is not true. Hence
// we now use the indexes partitioning function
switch (indexes_desc->body.indexes_desc.partitioningScheme)
{
case COM_ROUND_ROBIN_PARTITIONING :
// Round Robin partitioned table
partFunc = createRoundRobinPartitioningFunction(
files_desc->body.files_desc.partns_desc,
nodeMap,
heap);
break;
case COM_HASH_V1_PARTITIONING :
// Hash partitioned table
partFunc = createHashDistPartitioningFunction(
files_desc->body.files_desc.partns_desc,
partitioningKeyColumns,
nodeMap,
heap);
break;
case COM_HASH_V2_PARTITIONING :
// Hash partitioned table
partFunc = createHash2PartitioningFunction(
files_desc->body.files_desc.partns_desc,
partitioningKeyColumns,
nodeMap,
heap);
partitioningKeyColumns = hbaseSaltOnColumns;
break;
case COM_UNSPECIFIED_PARTITIONING :
case COM_NO_PARTITIONING :
case COM_RANGE_PARTITIONING :
case COM_SYSTEM_PARTITIONING :
{
// If this is an MP Table, parse the first key
// values as MP Stored Text.
//
desc_struct* hbd =
((table_desc_struct*)table_desc)->hbase_regionkey_desc;
// splits will be 1 for single partitioned table.
Int32 splits = findDescEntries(hbd);
// Do Hash2 only if the table is salted orignally
// and the current number of HBase regions is greater than 1.
if ( doHash2 )
doHash2 = (numOfSaltedPartitions > 0 && splits > 1);
if ( hbd )
if ( doHash2 ) {
partFunc = createHash2PartitioningFunctionForHBase(
((table_desc_struct*)table_desc)->hbase_regionkey_desc,
table,
numOfSaltedPartitions,
heap);
partitioningKeyColumns = hbaseSaltOnColumns;
}
else
partFunc = createRangePartitioningFunctionForHBase(
((table_desc_struct*)table_desc)->hbase_regionkey_desc,
table,
partitioningKeyColumns,
heap);
else {
// no region descriptor, range partitioned or single partition table
partFunc = createRangePartitioningFunction(
files_desc->body.files_desc.partns_desc,
partitioningKeyColumns,
nodeMap,
heap);
}
break;
}
case COM_UNKNOWN_PARTITIONING:
{
*CmpCommon::diags() << DgSqlCode(-4222)
<< DgString0("Unsupported partitioning");
return TRUE;
}
default:
CMPASSERT_STRING(FALSE, "Unhandled switch statement");
}
// Check to see if the partitioning function was created
// successfully. An error could occur if one of the
// partitioning keys is an unsupported type or if the table is
// an MP Table and the first key values contain MP syntax that
// is not supported by MX. The unsupported types are the
// FRACTION only SQL/MP Datetime types. An example of a
// syntax error is a Datetime literal which does not have the
// max number of digits in each field. (e.g. DATETIME
// '1999-2-4' YEAR TO DAY)
//
if (partFunc == NULL) {
return TRUE;
}
// currently we save the indexLevels in the fileset. Since there
// is a indexLevel for each file that belongs to the fileset,
// we get the biggest of this indexLevels and save in the fileset.
partns_desc = files_desc->body.files_desc.partns_desc;
if(partns_desc)
{
while (partns_desc)
{
if ( indexLevels < partns_desc->body.partns_desc.indexlevel)
indexLevels = partns_desc->body.partns_desc.indexlevel;
partns_desc = partns_desc->header.next;
}
}
}
// add a new access path
//
// $$$ The estimated number of records should be available from
// $$$ a FILES descriptor. If it is not available, it may have
// $$$ to be computed by examining the EOFs of each individual
// $$$ file that belongs to the file set.
// Create fully qualified ANSI name from indexname, the PHYSICAL name.
// If this descriptor was created for a sql/mp table, then the
// indexname is a fully qualified NSK name (\sys.$vol.subvol.name).
QualifiedName qualIndexName(indexes_desc->body.indexes_desc.indexname,
1, heap, bindWA);
// This ext_indexname is expected to be set up correctly as an
// EXTERNAL-format name (i.e., dquoted if any delimited identifiers)
// by sqlcat/read*.cpp. The ...AsAnsiString() is just-in-case (MP?).
NAString extIndexName(
indexes_desc->body.indexes_desc.ext_indexname ?
(NAString)indexes_desc->body.indexes_desc.ext_indexname :
qualIndexName.getQualifiedNameAsAnsiString(),
CmpCommon::statementHeap());
QualifiedName qualExtIndexName;
//if (indexes_desc->body.indexes_desc.isVolatile)
if (table->getSpecialType() != ExtendedQualName::VIRTUAL_TABLE)
qualExtIndexName = QualifiedName(extIndexName, 1, heap, bindWA);
else
qualExtIndexName = qualIndexName;
// for volatile tables, set the object part as the external name.
// cat/sch parts are internal and should not be shown.
if (indexes_desc->body.indexes_desc.isVolatile)
{
ComObjectName con(extIndexName);
extIndexName = con.getObjectNamePartAsAnsiString();
}
if (partFunc)
numberOfFiles = partFunc->getCountOfPartitions();
CMPASSERT(indexes_desc->body.indexes_desc.blocksize > 0);
NAList<HbaseCreateOption*>* hbaseCreateOptions = NULL;
if ((indexes_desc->body.indexes_desc.hbaseCreateOptions) &&
(CmpSeabaseDDL::genHbaseCreateOptions
(indexes_desc->body.indexes_desc.hbaseCreateOptions,
hbaseCreateOptions,
heap,
NULL,
NULL)))
return TRUE;
if (table->isHbaseTable())
{
indexLevels = hbtIndexLevels;
blockSize = hbtBlockSize;
}
newIndex = new (heap)
NAFileSet(
qualIndexName, // QN containing "\NSK.$VOL", FUNNYSV, FUNNYNM
//(indexes_desc->body.indexes_desc.isVolatile ?
qualExtIndexName, // :
//qualIndexName),
extIndexName, // string containing Ansi name CAT.SCH."indx"
files_desc ? files_desc->body.files_desc.fileorganization
: KEY_SEQUENCED_FILE,
isSystemTable,
numberOfFiles,
MAXOF(table_desc->body.table_desc.rowcount,0),
indexes_desc->body.indexes_desc.record_length,
blockSize,
indexLevels,
allColumns,
indexKeyColumns,
partitioningKeyColumns,
partFunc,
indexes_desc->body.indexes_desc.keytag,
uint32ArrayToInt64(
indexes_desc->body.indexes_desc.redeftime),
files_desc ? files_desc->body.files_desc.audit : 0,
files_desc ? files_desc->body.files_desc.auditcompress : 0,
files_desc ? files_desc->body.files_desc.compressed : 0,
files_desc ? (ComCompressionType)files_desc->body.files_desc.dcompressed : COM_NO_COMPRESSION,
files_desc ? files_desc->body.files_desc.icompressed : 0,
files_desc ? files_desc->body.files_desc.buffered: 0,
files_desc ? files_desc->body.files_desc.clearOnPurge: 0,
isPacked,
hasRemotePartition,
((indexes_desc->body.indexes_desc.keytag != 0) &&
(indexes_desc->body.indexes_desc.unique != 0)),
files_desc ? files_desc->body.files_desc.decoupledPartitionKeyList: 0,
files_desc ? files_desc->body.files_desc.fileCode : 0,
(indexes_desc->body.indexes_desc.isVolatile != 0),
(indexes_desc->body.indexes_desc.isInMemoryObjectDefn != 0),
indexes_desc->body.indexes_desc.indexUID,
indexes_desc->body.indexes_desc.keys_desc,
NULL, // no Hive stats
indexes_desc->body.indexes_desc.numSaltPartns,
hbaseCreateOptions,
heap);
if (isNotAvailable)
newIndex->setNotAvailable(TRUE);
newIndex->setRowFormat(indexes_desc->body.indexes_desc.rowFormat);
// Mark each NAColumn in the list
indexKeyColumns.setIndexKey();
if ((table->isHbaseTable()) && (indexes_desc->body.indexes_desc.keytag != 0))
saveNAColumns.setIndexKey();
if (indexes_desc->body.indexes_desc.isCreatedExplicitly)
newIndex->setIsCreatedExplicitly(TRUE);
//if index is unique and is on one column, then mark column as unique
if ((indexes_desc->body.indexes_desc.unique) &&
(indexKeyColumns.entries() == 1))
indexKeyColumns[0]->setIsUnique();
partitioningKeyColumns.setPartitioningKey();
// If it is a VP add it to the list of VPs.
// Otherwise, add it to the list of indices.
if (isVerticalPartition)
vertParts.insert(newIndex); // >>>> RETURN VALUE
else
{
indexes.insert(newIndex);
}
//
// advance to the next index
//
if (isTheClusteringKey)
{
clusteringIndex = newIndex; // >>>> RETURN VALUE
// switch to the alternate indexes by starting over again
isTheClusteringKey = FALSE;
indexes_desc = table_desc->body.table_desc.indexes_desc;
}
else
{
// simply advance to the next in the list
indexes_desc = indexes_desc->header.next;
}
// skip the clustering index, if we encounter it again
if (indexes_desc AND !indexes_desc->body.indexes_desc.keytag)
indexes_desc = indexes_desc->header.next;
} // end while (indexes_desc)
// logic related to indexes hiding
return FALSE;
} // static createNAFileSets()
#pragma warn(1506) // warning elimination
// for Hive tables
NABoolean createNAFileSets(hive_tbl_desc* hvt_desc /*IN*/,
const NATable * table /*IN*/,
const NAColumnArray & colArray /*IN*/,
NAFileSetList & indexes /*OUT*/,
NAFileSetList & vertParts /*OUT*/,
NAFileSet * & clusteringIndex /*OUT*/,
LIST(CollIndex) & tableIdList /*OUT*/,
NAMemory* heap,
BindWA * bindWA,
Int32 *maxIndexLevelsPtr = NULL)
{
NABoolean isTheClusteringKey = TRUE;
NABoolean isVerticalPartition;
NABoolean hasRemotePartition = FALSE;
CollIndex numClusteringKeyColumns = 0;
// Set up global cluster information. This global information always
// gets put on the context heap.
//
// Note that this function call will probably not do anything, since
// this cluster information is set up when arkcmp is created; however,
// it's certainly better to have this call here, rather than in a
// doubly-nested loop below where it used to be ...
// $$$ probably not necessary to call this even once ...
setUpClusterInfo(CmpCommon::contextHeap());
// only one set of key columns to handle for hive
Lng32 numberOfFiles = 1; // always at least 1
// NAColumn * indexColumn; // an index/VP key column
NAFileSet * newIndex; // a new file set
// all columns that belong to an index
NAColumnArray allColumns(CmpCommon::statementHeap());
// the index key columns - the SORT columns
NAColumnArray indexKeyColumns(CmpCommon::statementHeap());
// the partitioning key columns - the BUCKETING columns
NAColumnArray partitioningKeyColumns(CmpCommon::statementHeap());
PartitioningFunction * partFunc = NULL;
// is this an index or is it really a VP?
isVerticalPartition = FALSE;
NABoolean isPacked = FALSE;
NABoolean isNotAvailable = FALSE;
// ---------------------------------------------------------------------
// loop over the clustering key columns of the index
// ---------------------------------------------------------------------
const hive_bkey_desc *hbk_desc = hvt_desc->getBucketingKeys();
Int32 numBucketingColumns = 0;
while (hbk_desc)
{
NAString colName(hbk_desc->name_);
colName.toUpper();
NAColumn* bucketingColumn = colArray.getColumn(colName);
if ( bucketingColumn ) {
partitioningKeyColumns.insert(bucketingColumn);
numBucketingColumns++;
}
hbk_desc = hbk_desc->next_;
} // end while (hvk_desc)
const hive_skey_desc *hsk_desc = hvt_desc->getSortKeys();
if ( hsk_desc == NULL ) {
// assume all columns are index key columns
for (CollIndex i=0; i<colArray.entries(); i++ )
indexKeyColumns.insert(colArray[i]);
} else {
while (hsk_desc)
{
NAString colName(hsk_desc->name_);
colName.toUpper();
NAColumn* sortKeyColumn = colArray.getColumn(colName);
if ( sortKeyColumn ) {
indexKeyColumns.insert(sortKeyColumn);
indexKeyColumns.setAscending(indexKeyColumns.entries() - 1,
hsk_desc->orderInt_);
}
hsk_desc = hsk_desc->next_;
} // end while (hsk_desc)
}
// ---------------------------------------------------------------------
// Loop over the non key columns.
// ---------------------------------------------------------------------
for (CollIndex i=0; i<colArray.entries(); i++)
{
allColumns.insert(colArray[i]);
}
//increment for each table/index to create unique identifier
cmpCurrentContext->incrementTableIdent();
// collect file stats from HDFS for the table
const hive_sd_desc *sd_desc = hvt_desc->getSDs();
HHDFSTableStats * hiveHDFSTableStats = new(heap) HHDFSTableStats(heap);
hiveHDFSTableStats->
setPortOverride(CmpCommon::getDefaultLong(HIVE_LIB_HDFS_PORT_OVERRIDE));
// create file-level statistics and estimate total row count and record length
hiveHDFSTableStats->populate(hvt_desc);
if (hiveHDFSTableStats->hasError())
{
*CmpCommon::diags() << DgSqlCode(-1200)
<< DgString0(hiveHDFSTableStats->getDiags().getErrMsg())
<< DgTableName(table->getTableName().getQualifiedNameAsAnsiString());
return TRUE;
}
if ((hiveHDFSTableStats->isOrcFile()) &&
(CmpCommon::getDefault(TRAF_ENABLE_ORC_FORMAT) == DF_OFF))
{
*CmpCommon::diags() << DgSqlCode(-3069)
<< DgTableName(table->getTableName().getQualifiedNameAsAnsiString());
return TRUE;
}
#ifndef NDEBUG
NAString logFile =
ActiveSchemaDB()->getDefaults().getValue(HIVE_HDFS_STATS_LOG_FILE);
if (logFile.length())
{
FILE *ofd = fopen(logFile, "a");
if (ofd)
{
hiveHDFSTableStats->print(ofd);
fclose(ofd);
}
}
// for release code, would need to sandbox the ability to write
// files, e.g. to a fixed log directory
#endif
// Create a node map for partitioning function.
NodeMap* nodeMap = new (heap) NodeMap(heap, NodeMap::HIVE);
createNodeMap(hvt_desc,
nodeMap,
heap,
(char*)(table->getTableName().getObjectName().data()),
cmpCurrentContext->getTableIdent());
tableIdList.insert(CollIndex(cmpCurrentContext->getTableIdent()));
// For the time being, set it up as Hash2 partitioned table
Int32 numBuckets = hvt_desc->getSDs()->buckets_;
if (numBuckets>1 && partitioningKeyColumns.entries()>0) {
if ( CmpCommon::getDefault(HIVE_USE_HASH2_AS_PARTFUNCION) == DF_ON )
partFunc = createHash2PartitioningFunction
(numBuckets, partitioningKeyColumns, nodeMap, heap);
else
partFunc = createHivePartitioningFunction
(numBuckets, partitioningKeyColumns, nodeMap, heap);
} else
partFunc = new (heap)
SinglePartitionPartitioningFunction(nodeMap, heap);
// NB: Just in case, we made a call to setupClusterInfo at the
// beginning of this function.
// desc_struct * partns_desc;
Int32 indexLevels = 1;
// add a new access path
//
// $$$ The estimated number of records should be available from
// $$$ a FILES descriptor. If it is not available, it may have
// $$$ to be computed by examining the EOFs of each individual
// $$$ file that belongs to the file set.
// Create fully qualified ANSI name from indexname, the PHYSICAL name.
// If this descriptor was created for a sql/mp table, then the
// indexname is a fully qualified NSK name (\sys.$vol.subvol.name).
QualifiedName qualIndexName(
(char*)(table->getTableName().getObjectName().data()),
"HIVE", "", heap);
// This ext_indexname is expected to be set up correctly as an
// EXTERNAL-format name (i.e., dquoted if any delimited identifiers)
// by sqlcat/read*.cpp. The ...AsAnsiString() is just-in-case (MP?).
NAString extIndexName(
qualIndexName.getQualifiedNameAsAnsiString(),
CmpCommon::statementHeap());
QualifiedName qualExtIndexName = QualifiedName(extIndexName, 1, heap, bindWA);
if (partFunc)
numberOfFiles = partFunc->getCountOfPartitions();
Int64 estimatedRC = 0;
Int64 estimatedRecordLength = 0;
if ( !sd_desc->isTrulyText() ) {
//
// Poor man's estimation by assuming the record length in hive is the
// same as SQ's. We can do better once we know how the binary data is
// stored in hdfs.
//
estimatedRecordLength = colArray.getTotalStorageSize();
estimatedRC = hiveHDFSTableStats->getTotalSize() / estimatedRecordLength;
} else {
// use the information estimated during populate() call
estimatedRC = hiveHDFSTableStats->getEstimatedRowCount();
estimatedRecordLength =
Lng32(MINOF(hiveHDFSTableStats->getEstimatedRecordLength(),
hiveHDFSTableStats->getEstimatedBlockSize()-100));
}
((NATable*)table)-> setOriginalRowCount((double)estimatedRC);
newIndex = new (heap)
NAFileSet(
qualIndexName, // QN containing "\NSK.$VOL", FUNNYSV, FUNNYNM
//(indexes_desc->body.indexes_desc.isVolatile ?
qualExtIndexName, // :
//qualIndexName),
extIndexName, // string containing Ansi name CAT.SCH."indx"
// The real orginization is a hybrid of KEY_SEQ and HASH.
// Well, we just take the KEY_SEQ for now.
KEY_SEQUENCED_FILE,
FALSE, // isSystemTable
numberOfFiles,
// HIVE-TBD
Cardinality(estimatedRC),
Lng32(estimatedRecordLength),
//hvt_desc->getBlockSize(),
(Lng32)hiveHDFSTableStats->getEstimatedBlockSize(),
indexLevels, // HIVE-TBD
allColumns,
indexKeyColumns,
partitioningKeyColumns,
partFunc,
0, // indexes_desc->body.indexes_desc.keytag,
hvt_desc->redeftime(),
1, // files_desc->body.files_desc.audit
0, // files_desc->body.files_desc.auditcompress
0, // files_desc->body.files_desc.compressed
COM_NO_COMPRESSION,
0, // files_desc->body.files_desc.icompressed
0, // files_desc->body.files_desc.buffered:
0, // files_desc->body.files_desc.clearOnPurge: 0,
isPacked,
hasRemotePartition,
0, // not a unique secondary index
0, // isDecoupledRangePartitioned
0, // file code
0, // not a volatile
0, // inMemObjectDefn
0,
NULL, // indexes_desc->body.indexes_desc.keys_desc,
hiveHDFSTableStats,
0, // saltPartns
NULL, //hbaseCreateOptions
heap);
if (isNotAvailable)
newIndex->setNotAvailable(TRUE);
// Mark each NAColumn in the list
indexKeyColumns.setIndexKey();
partitioningKeyColumns.setPartitioningKey();
// If it is a VP add it to the list of VPs.
// Otherwise, add it to the list of indices.
indexes.insert(newIndex);
clusteringIndex = newIndex;
return FALSE;
} // static createNAFileSets()
#pragma warn(1506) // warning elimination
// -----------------------------------------------------------------------
// Mark columns named in PRIMARY KEY constraint (these will be different
// from clustering key columns when the PK is droppable), for Binder error 4033.
// -----------------------------------------------------------------------
static void markPKCols(const constrnts_desc_struct * constrnt /*IN*/,
const NAColumnArray& columnArray /*IN*/)
{
desc_struct *keycols_desc = constrnt->constr_key_cols_desc;
while (keycols_desc)
{
constrnt_key_cols_desc_struct *key =
&keycols_desc->body.constrnt_key_cols_desc;
// Lookup by name (not position: key->position is pos *within the PK*)
NAColumn *nacol = columnArray.getColumn(key->colname);
if(nacol != NULL)
nacol->setPrimaryKey();
keycols_desc = keycols_desc->header.next;
}
} // static markPKCols
// -----------------------------------------------------------------------
// Insert MP CHECK CONSTRAINT text into NATable::checkConstraints_.
// -----------------------------------------------------------------------
static NABoolean
createConstraintInfo(const desc_struct * table_desc /*IN*/,
const QualifiedName& tableQualName /*IN*/,
const NAColumnArray& columnArray /*IN*/,
CheckConstraintList& checkConstraints /*OUT*/,
AbstractRIConstraintList& uniqueConstraints,
AbstractRIConstraintList& refConstraints,
NAMemory* heap,
BindWA *bindWA)
{
desc_struct *constrnts_desc = table_desc->body.table_desc.constrnts_desc;
while (constrnts_desc)
{
constrnts_desc_struct *constrntHdr = &constrnts_desc->body.constrnts_desc;
Int32 minNameParts=3;
QualifiedName constrntName(constrntHdr->constrntname, minNameParts, (NAMemory*)0, bindWA);
if (constrntName.numberExpanded() == 0) {
// There was an error parsing the name of the constraint (see
// QualifiedName ctor). Return TRUE indicating an error.
//
return TRUE;
}
switch (constrntHdr->type)
{
case PRIMARY_KEY_CONSTRAINT:
markPKCols(constrntHdr, columnArray);
case UNIQUE_CONSTRAINT: {
UniqueConstraint *uniqueConstraint = new (heap)
UniqueConstraint(constrntName, tableQualName, heap,
(constrntHdr->type == PRIMARY_KEY_CONSTRAINT));
uniqueConstraint->setKeyColumns(constrntHdr, heap);
uniqueConstraint->setRefConstraintsReferencingMe(constrntHdr, heap, bindWA);
uniqueConstraints.insert(uniqueConstraint);
}
break;
case REF_CONSTRAINT:
{
char *refConstrntName = constrntHdr->referenced_constrnts_desc->
body.ref_constrnts_desc.constrntname;
char *refTableName = constrntHdr->referenced_constrnts_desc->
body.ref_constrnts_desc.tablename;
QualifiedName refConstrnt(refConstrntName, 3, (NAMemory*)0, bindWA);
QualifiedName refTable(refTableName, 3, (NAMemory*)0, bindWA);
RefConstraint *refConstraint = new (heap)
RefConstraint(constrntName, tableQualName,
refConstrnt, refTable, heap);
refConstraint->setKeyColumns(constrntHdr, heap);
refConstraint->setIsEnforced((constrntHdr->isEnforced == 1));
refConstraints.insert(refConstraint);
}
break;
case CHECK_CONSTRAINT:
case MP_CHECK_CONSTRAINT:
{
char *constrntText = constrntHdr->check_constrnts_desc->
body.check_constrnts_desc.constrnt_text;
checkConstraints.insert(new (heap)
CheckConstraint(constrntName, constrntText, heap));
}
break;
default:
CMPASSERT(FALSE);
}
constrnts_desc = constrnts_desc->header.next;
}
// return FALSE, indicating no error.
//
return FALSE;
} // static createConstraintInfo()
ULng32 hashColPosList(const CollIndexSet &colSet)
{
return colSet.hash();
}
// ----------------------------------------------------------------------------
// method: lookupObjectUidByName
//
// Calls DDL manager to get the object UID for the specified object
//
// params:
// qualName - name of object to lookup
// objectType - type of object
// reportError - whether to set diags area when not found
//
// returns:
// -1 -> error found trying to read metadata including object not found
// UID of found object
//
// the diags area contains details of any error detected
//
// ----------------------------------------------------------------------------
static Int64 lookupObjectUidByName( const QualifiedName& qualName
, ComObjectType objectType
, NABoolean reportError
)
{
ExeCliInterface cliInterface(STMTHEAP);
Int64 objectUID = 0;
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return -1;
}
objectUID = cmpSBD.getObjectUID(&cliInterface,
qualName.getCatalogName().data(),
qualName.getSchemaName().data(),
qualName.getObjectName().data(),
comObjectTypeLit(objectType),
NULL,
NULL,
FALSE,
reportError);
cmpSBD.switchBackCompiler();
return objectUID;
}
NABoolean NATable::fetchObjectUIDForNativeTable(const CorrName& corrName)
{
NAString adjustedName = ComConvertNativeNameToTrafName
(corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getUnqualifiedSchemaNameAsAnsiString(),
corrName.getQualifiedNameObj().getUnqualifiedObjectNameAsAnsiString());
QualifiedName extObjName (adjustedName, 3, STMTHEAP);
objectUID_ = lookupObjectUidByName(extObjName, COM_BASE_TABLE_OBJECT, FALSE);
// If the objectUID is not found, then the table is not externally defined
// in Trafodion, set the objectUID to 0
// If an unexpected error occurs, then return with the error
if (objectUID_ <= 0)
{
if (CmpCommon::diags()->mainSQLCODE() < 0)
return FALSE;
else
objectUID_ = 0;
}
return TRUE;
}
// -----------------------------------------------------------------------
// NATable::NATable() constructor
// -----------------------------------------------------------------------
const Lng32 initHeapSize = 32 * 1024; // ## 32K: tune this someday!
#pragma nowarn(770) // warning elimination
NATable::NATable(BindWA *bindWA,
const CorrName& corrName,
NAMemory *heap,
desc_struct* inTableDesc)
//
// The NATable heap ( i.e. heap_ ) used to come from ContextHeap
// (i.e. heap) but it creates high memory usage/leakage in Context
// Heap. Although the NATables are deleted at the end of each statement,
// the heap_ is returned to heap (i.e. context heap) which caused
// context heap containing a lot of not used chunk of memory. So it is
// changed to be from whatever heap is passed in at the call in
// NATableDB.getNATable.
//
// Now NATable objects can be cached.If an object is to be cached (persisted
// across statements) a NATable heap is allocated for the object
// and is passed in (this is done in NATableDB::get(CorrName& corrName...).
// Otherwise a reference to the Statement heap is passed in. When a cached
// object is to be deleted the object's heap is deleted which wipes out the
// NATable object all its related stuff. NATable objects that are not cached
// are wiped out at the end of the statement when the statement heap is deleted.
//
: heap_(heap),
referenceCount_(0),
refsIncompatibleDP2Halloween_(FALSE),
isHalloweenTable_(FALSE),
qualifiedName_(corrName.getExtendedQualNameObj(),heap),
synonymReferenceName_(heap),
fileSetName_(corrName.getQualifiedNameObj(),heap), // for now, set equal
clusteringIndex_(NULL),
colcount_(0),
colArray_(heap),
recordLength_(0),
indexes_(heap),
vertParts_(heap),
colStats_(NULL),
statsFetched_(FALSE),
viewFileName_(NULL),
viewText_(NULL),
viewTextInNAWchars_(heap),
viewTextCharSet_(CharInfo::UnknownCharSet),
viewCheck_(NULL),
flags_(IS_INSERTABLE | IS_UPDATABLE),
insertMode_(COM_REGULAR_TABLE_INSERT_MODE),
isSynonymTranslationDone_(FALSE),
checkConstraints_(heap),
createTime_(0),
redefTime_(0),
cacheTime_(0),
statsTime_(0),
catalogUID_(0),
schemaUID_(0),
objectUID_(0),
objectType_(COM_UNKNOWN_OBJECT),
partitioningScheme_(COM_UNKNOWN_PARTITIONING),
uniqueConstraints_(heap),
refConstraints_(heap),
isAnMV_(FALSE),
isAnMVMetaData_(FALSE),
mvsUsingMe_(heap),
mvInfo_(NULL),
accessedInCurrentStatement_(TRUE),
setupForStatement_(FALSE),
resetAfterStatement_(FALSE),
hitCount_(0),
replacementCounter_(2),
sizeInCache_(0),
recentlyUsed_(TRUE),
tableConstructionHadWarnings_(FALSE),
isAnMPTableWithAnsiName_(FALSE),
isUMDTable_(FALSE),
isSMDTable_(FALSE),
isMVUMDTable_(FALSE),
// For virtual tables, we set the object schema version
// to be the current schema version
osv_(COM_VERS_CURR_SCHEMA),
ofv_(COM_VERS_CURR_SCHEMA),
partnsDesc_(NULL),
colsWithMissingStats_(NULL),
originalCardinality_(-1.0),
tableIdList_(heap),
rcb_(NULL),
rcbLen_(0),
keyLength_(0),
parentTableName_(NULL),
sgAttributes_(NULL),
isHive_(FALSE),
isHbase_(FALSE),
isHbaseCell_(FALSE),
isHbaseRow_(FALSE),
isSeabase_(FALSE),
isSeabaseMD_(FALSE),
isSeabasePrivSchemaTable_(FALSE),
isUserUpdatableSeabaseMD_(FALSE),
resetHDFSStatsAfterStmt_(FALSE),
hiveDefaultStringLen_(0),
hiveTableId_(-1),
tableDesc_(inTableDesc),
privInfo_(NULL),
secKeySet_(heap),
newColumns_(heap),
snapshotName_(NULL),
prototype_(NULL)
{
NAString tblName = qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString();
NAString mmPhase;
Lng32 preCreateNATableWarnings = CmpCommon::diags()->getNumber(DgSqlCode::WARNING_);
//set heap type
if(heap_ == CmpCommon::statementHeap()){
heapType_ = STATEMENT;
mmPhase = "NATable Init (Stmt) - " + tblName;
}else if (heap_ == CmpCommon::contextHeap()){
heapType_ = CONTEXT;
mmPhase = "NATable Init (Cnxt) - " + tblName;
}else {
heapType_ = OTHER;
mmPhase = "NATable Init (Other) - " + tblName;
}
MonitorMemoryUsage_Enter((char*)mmPhase.data(), heap_, TRUE);
// Do a readTableDef, if table descriptor has not been passed in
//
desc_struct * table_desc;
Int32 *maxIndexLevelsPtr = new (STMTHEAP) Int32;
if (!inTableDesc)
{
// lookup from metadata other than HBase is not currently supported
CMPASSERT(inTableDesc);
}
else
{
// use the input descriptor to create NATable.
// Used if 'virtual' tables, like EXPLAIN,
// DESCRIBE, RESOURCE_FORK, etc are to be created.
table_desc = inTableDesc;
// Need to initialize the maxIndexLevelsPtr field
*maxIndexLevelsPtr = 1;
}
if ((corrName.isHbase()) || (corrName.isSeabase()))
{
setIsHbaseTable(TRUE);
setIsSeabaseTable(corrName.isSeabase());
setIsHbaseCellTable(corrName.isHbaseCell());
setIsHbaseRowTable(corrName.isHbaseRow());
setIsSeabaseMDTable(corrName.isSeabaseMD());
}
// Check if the synonym name translation to reference object has been done.
if (table_desc->body.table_desc.isSynonymNameTranslationDone)
{
isSynonymTranslationDone_ = TRUE;
NAString synonymReferenceName(table_desc->body.table_desc.tablename);
synonymReferenceName_ = synonymReferenceName;
ComUID uid(table_desc->body.table_desc.objectUID[0]*0x100000000LL +
table_desc->body.table_desc.objectUID[1]);
synonymReferenceObjectUid_ = uid;
}
// Check if it is a UMD table, or SMD table or MV related UMD object
// and set cll correcsponding flags to indicate this.
if (table_desc->body.table_desc.isUMDTable)
{
isUMDTable_ = TRUE;
}
if (table_desc->body.table_desc.issystemtablecode)
{
isSMDTable_ = TRUE;
}
if (table_desc->body.table_desc.isMVMetaDataObject)
{
isMVUMDTable_ = TRUE;
}
isTrigTempTable_ = (qualifiedName_.getSpecialType() == ExtendedQualName::TRIGTEMP_TABLE);
switch(table_desc->body.table_desc.rowFormat)
{
case COM_PACKED_FORMAT_TYPE:
setSQLMXTable(TRUE);
break;
case COM_ALIGNED_FORMAT_TYPE:
setSQLMXAlignedTable(TRUE);
break;
case COM_HBASE_FORMAT_TYPE:
case COM_UNKNOWN_FORMAT_TYPE:
break;
}
if (table_desc->body.table_desc.isVolatile)
{
setVolatileTable( TRUE );
}
if (table_desc->body.table_desc.isInMemoryObjectDefn)
{
setInMemoryObjectDefn( TRUE );
}
if (table_desc->body.table_desc.isDroppable)
{
setDroppableTable( TRUE );
}
if (corrName.isExternal())
{
setIsExternalTable(TRUE);
}
if (qualifiedName_.getQualifiedNameObj().isHistograms() ||
qualifiedName_.getQualifiedNameObj().isHistogramIntervals())
{
setIsHistogramTable(TRUE);
}
insertMode_ = table_desc->body.table_desc.insertMode;
setRecordLength(table_desc->body.table_desc.record_length);
//
// Add timestamp information.
//
createTime_ = uint32ArrayToInt64(table_desc->body.table_desc.createtime);
redefTime_ = uint32ArrayToInt64(table_desc->body.table_desc.redeftime);
cacheTime_ = uint32ArrayToInt64(table_desc->body.table_desc.cachetime);
catalogUID_ = uint32ArrayToInt64(table_desc->body.table_desc.catUID);
schemaUID_ = uint32ArrayToInt64(table_desc->body.table_desc.schemaUID);
objectUID_ = uint32ArrayToInt64(table_desc->body.table_desc.objectUID);
// Set the objectUID_ for hbase Cell and Row tables, if the table has
// been defined in Trafodion use this value, otherwise, set to 0
if (isHbaseCell_ || isHbaseRow_)
{
if ( !fetchObjectUIDForNativeTable(corrName) )
return;
if (objectUID_ > 0 )
setHasExternalTable(TRUE);
}
if (table_desc->body.table_desc.owner)
{
Int32 userInfo (table_desc->body.table_desc.owner);
owner_ = userInfo;
}
if (table_desc->body.table_desc.schemaOwner)
{
Int32 schemaUser(table_desc->body.table_desc.schemaOwner);
schemaOwner_ = schemaUser;
}
objectType_ = table_desc->body.table_desc.objectType;
partitioningScheme_ = table_desc->body.table_desc.partitioningScheme;
// Set up privs
if ((corrName.getSpecialType() == ExtendedQualName::SG_TABLE) ||
(!(corrName.isSeabaseMD() || corrName.isSpecialTable())))
setupPrivInfo();
if ((table_desc->body.table_desc.tableFlags & SEABASE_OBJECT_IS_EXTERNAL_HIVE) != 0 ||
(table_desc->body.table_desc.tableFlags & SEABASE_OBJECT_IS_EXTERNAL_HBASE) != 0)
setIsExternalTable(TRUE);
rcb_ = table_desc->body.table_desc.rcb;
rcbLen_ = table_desc->body.table_desc.rcbLen;
keyLength_ = table_desc->body.table_desc.keyLen;
if (table_desc->body.table_desc.parentTableName)
{
parentTableName_ =
new(heap_) char[strlen(table_desc->body.table_desc.parentTableName) + 1];
strcpy(parentTableName_, table_desc->body.table_desc.parentTableName);
}
if (table_desc->body.table_desc.snapshotName)
{
snapshotName_ =
new(heap_) char[strlen(table_desc->body.table_desc.snapshotName) + 1];
strcpy(snapshotName_, table_desc->body.table_desc.snapshotName);
}
if (table_desc->body.table_desc.default_col_fam)
defaultColFam_ = table_desc->body.table_desc.default_col_fam;
if (table_desc->body.table_desc.all_col_fams)
{
// Space delimited col families.
string buf; // Have a buffer string
stringstream ss(table_desc->body.table_desc.all_col_fams); // Insert the string into a stream
while (ss >> buf)
{
allColFams_.insert(buf.c_str());
}
}
else
allColFams_.insert(defaultColFam_);
desc_struct * files_desc = table_desc->body.table_desc.files_desc;
// Some objects don't have a file_desc set up (e.g. views)
// Therefore, only setup the partnsDesc_ if this is a partitionable object
if (files_desc)
{
if (files_desc->body.files_desc.partns_desc)
partnsDesc_ = files_desc->body.files_desc.partns_desc;
}
else
partnsDesc_ = NULL;
//
// Insert a NAColumn in the colArray_ for this NATable for each
// columns_desc from the ARK SMD. Returns TRUE if error creating NAColumns.
//
if (createNAColumns(table_desc->body.table_desc.columns_desc,
this,
colArray_ /*OUT*/,
heap_))
//coverity[leaked_storage]
return; // colcount_ == 0 indicates an error
//
// Add view information, if this is a view
//
desc_struct *view_desc = table_desc->body.table_desc.views_desc;
if (view_desc)
{
viewText_ = new (heap_) char[strlen(view_desc->body.view_desc.viewtext) + 2];
strcpy(viewText_, view_desc->body.view_desc.viewtext);
strcat(viewText_, ";");
viewTextCharSet_ = (CharInfo::CharSet)view_desc->body.view_desc.viewtextcharset;
viewCheck_ = NULL; //initialize
if(view_desc->body.view_desc.viewchecktext){
UInt32 viewCheckLength = str_len(view_desc->body.view_desc.viewchecktext)+1;
viewCheck_ = new (heap_) char[ viewCheckLength];
memcpy(viewCheck_, view_desc->body.view_desc.viewchecktext,
viewCheckLength);
}
setUpdatable(view_desc->body.view_desc.updatable);
setInsertable(view_desc->body.view_desc.insertable);
//
// The updatable flag is false for an MP view only if it is NOT a
// protection view. Therefore updatable == FALSE iff it is a
// shorthand view. See ReadTableDef.cpp, l. 3379.
//
viewFileName_ = NULL;
CMPASSERT(view_desc->body.view_desc.viewfilename);
UInt32 viewFileNameLength = str_len(view_desc->body.view_desc.viewfilename) + 1;
viewFileName_ = new (heap_) char[viewFileNameLength];
memcpy(viewFileName_, view_desc->body.view_desc.viewfilename,
viewFileNameLength);
}
else
{
//keep track of memory used by NAFileSets
Lng32 preCreateNAFileSetsMemSize = heap_->getAllocSize();
//
// Process indexes and vertical partitions for this table.
//
if (createNAFileSets(table_desc /*IN*/,
this /*IN*/,
colArray_ /*IN*/,
indexes_ /*OUT*/,
vertParts_ /*OUT*/,
clusteringIndex_ /*OUT*/,
tableIdList_ /*OUT*/,
heap_,
bindWA,
newColumns_, /*OUT*/
maxIndexLevelsPtr)) {
return; // colcount_ == 0 indicates an error
}
// Add constraint info.
//
// This call to createConstraintInfo, calls the parser on
// the constraint name
//
NABoolean errorOccurred =
createConstraintInfo(table_desc /*IN*/,
getTableName() /*IN*/,
getNAColumnArray()/*IN (some columns updated)*/,
checkConstraints_ /*OUT*/,
uniqueConstraints_/*OUT*/,
refConstraints_ /*OUT*/,
heap_,
bindWA);
if (errorOccurred) {
// return before setting colcount_, indicating that there
// was an error in constructing this NATable.
//
return;
}
//
// FetchHistograms call used to be here -- moved to getStatistics().
//
}
// change partFunc for base table if PARTITION clause has been used
// to limit the number of partitions that will be accessed.
if ((qualifiedName_.isPartitionNameSpecified()) ||
(qualifiedName_.isPartitionRangeSpecified())) {
if (filterUnusedPartitions(corrName.getPartnClause())) {
return ;
}
}
//
// Set colcount_ after all possible errors (Binder uses nonzero colcount
// as an indicator of valid table definition).
//
CMPASSERT(table_desc->body.table_desc.colcount >= 0); // CollIndex cast ok?
colcount_ = (CollIndex)table_desc->body.table_desc.colcount;
// If there is a host variable associated with this table, store it
// for use by the generator to generate late-name resolution information.
//
HostVar *hv = corrName.getPrototype();
prototype_ = hv ? new (heap_) HostVar(*hv) : NULL;
// MV
// Initialize the MV support data members
isAnMV_ = table_desc->body.table_desc.isMVtable;
isAnMVMetaData_ = table_desc->body.table_desc.isMVMetaDataObject;
mvAttributeBitmap_.initBitmap(table_desc->body.table_desc.mvAttributesBitmap);
desc_struct *mvs_desc = table_desc->body.table_desc.using_mvs_desc;
// Memory Leak
while (mvs_desc)
{
using_mv_desc_struct *mv = &mvs_desc->body.using_mv_desc;
UsingMvInfo *usingMv = new(heap_)
UsingMvInfo(mv->mvName, mv->refreshType, mv->rewriteEnabled,
mv->isInitialized, heap_);
mvsUsingMe_.insert(usingMv);
mvs_desc = mvs_desc->header.next;
}
// ++MV
// fix the special-type for MV objects. There are case where the type is
// set to NORMAL_TABLE although this is an MV.
//
// Example:
// --------
// in the statement "select * from MV1" mv1 will have a NORMAL_TABLE
// special-type, while in "select * from table(mv_table MV1)" it will
// have the MV_TABLE special-type.
if (isAnMV_)
{
switch(qualifiedName_.getSpecialType())
{
case ExtendedQualName::GHOST_TABLE:
qualifiedName_.setSpecialType(ExtendedQualName::GHOST_MV_TABLE);
break;
case ExtendedQualName::GHOST_MV_TABLE:
// Do not change it
break;
default:
qualifiedName_.setSpecialType(ExtendedQualName::MV_TABLE);
break;
}
}
// --MV
// Initialize the sequence generator fields
desc_struct *sequence_desc = table_desc->body.table_desc.sequence_generator_desc;
if (sequence_desc != NULL) {
sequence_generator_desc_struct *sg_desc = &sequence_desc->body.sequence_generator_desc;
if (sg_desc != NULL)
{
sgAttributes_ =
new(heap_) SequenceGeneratorAttributes(
sg_desc->startValue,
sg_desc->increment,
sg_desc->maxValue,
sg_desc->minValue,
sg_desc->sgType,
sg_desc->sqlDataType,
sg_desc->fsDataType,
sg_desc->cycleOption,
FALSE,
sg_desc->objectUID,
sg_desc->cache,
sg_desc->nextValue,
0,
sg_desc->redefTime);
}
}
#ifndef NDEBUG
if (getenv("NATABLE_DEBUG"))
{
cout << "NATable " << (void*)this << " "
<< qualifiedName_.getQualifiedNameObj().getQualifiedNameAsAnsiString() << " "
<< (Int32)qualifiedName_.getSpecialType() << endl;
colArray_.print();
}
#endif
//this guy is cacheable
if((qualifiedName_.isCacheable())&&
(NOT (isHbaseTable())) &&
//this object is not on the statement heap (i.e. it is being cached)
((heap_ != CmpCommon::statementHeap())||
(OSIM_runningInCaptureMode())))
{
char * nodeName = NULL;
char * catStr = NULL;
char * schemaStr = NULL;
char * fileStr = NULL;
short nodeNameLen = 0;
Int32 catStrLen = 0;
Int32 schemaStrLen = 0;
Int32 fileStrLen = 0;
#ifdef NA_64BIT
// dg64 - match signature
int_32 primaryNodeNum=0;
#else
Int32 primaryNodeNum=0;
#endif
short error = 0;
//clusteringIndex has physical filename that can be used to check
//if a catalog operation has been performed on a table.
//Views don't have clusteringIndex, so we get physical filename
//from the viewFileName_ datamember.
if(viewText_)
{
//view filename starts with node name
//filename is in format \<node_name>.$<volume>.<subvolume>.<file>
//catStr => <volume>
//schemaStr => <subvolume>
//fileStr => <file>
nodeName = viewFileName_;
catStr = nodeName;
//skip over node name
//measure node name length
//get to begining of volume name
//Measure length of node name
//skip over node name i.e. \MAYA, \AZTEC, etc
//and get to volume name
while((nodeName[nodeNameLen]!='.')&&
(nodeNameLen < 8)){
catStr++;
nodeNameLen++;
};
//skip over '.' and the '$' in volume name
catStr=&nodeName[nodeNameLen+2];
schemaStr=catStr;
//skip over the volume/catalog name
//while measuring catalog name length
while((catStr[catStrLen]!='.')&&
(catStrLen < 8))
{
schemaStr++;
catStrLen++;
}
//skip over the '.'
schemaStr++;
fileStr=schemaStr;
//skip over the subvolume/schema name
//while measuring schema name length
while((schemaStr[schemaStrLen]!='.')&&
(schemaStrLen < 8))
{
fileStr++;
schemaStrLen++;
}
//skip over the '.'
fileStr++;
fileStrLen = str_len(fileStr);
//figure out the node number for the node
//which has the primary partition.
primaryNodeNum=0;
if(!OSIM_runningSimulation())
primaryNodeNum = gpClusterInfo->mapNodeNameToNodeNum(NAString(nodeName));
}
else{
//get qualified name of the clustering index which should
//be the actual physical file name of the table
const QualifiedName fileNameObj = getClusteringIndex()->
getRandomPartition();
const NAString fileName = fileNameObj.getObjectName();
//get schemaName object
const SchemaName schemaNameObj = fileNameObj.getSchemaName();
const NAString schemaName = schemaNameObj.getSchemaName();
//get catalogName object
//this contains a string in the form \<node_name>.$volume
const CatalogName catalogNameObj = fileNameObj.getCatalogName();
const NAString catalogName = catalogNameObj.getCatalogName();
nodeName = (char*) catalogName.data();
catStr = nodeName;
//Measure length of node name
//skip over node name i.e. \MAYA, \AZTEC, etc
//and get to volume name
while((nodeName[nodeNameLen]!='.')&&
(nodeNameLen < 8)){
catStr++;
nodeNameLen++;
};
//get volume/catalog name
//skip ".$"
catStr=&nodeName[nodeNameLen+2];
#pragma nowarn(1506) // warning elimination
catStrLen = catalogName.length() - (nodeNameLen+2);
#pragma warn(1506) // warning elimination
//get subvolume/schema name
schemaStr = (char *) schemaName.data();
#pragma nowarn(1506) // warning elimination
schemaStrLen = schemaName.length();
#pragma warn(1506) // warning elimination
//get file name
fileStr = (char *) fileName.data();
#pragma nowarn(1506) // warning elimination
fileStrLen = fileName.length();
#pragma warn(1506) // warning elimination
//figure out the node number for the node
//which has the primary partition.
primaryNodeNum=0;
primaryNodeNum = gpClusterInfo->mapNodeNameToNodeNum(NAString(nodeName));
}
}
Lng32 postCreateNATableWarnings = CmpCommon::diags()->getNumber(DgSqlCode::WARNING_);
if(postCreateNATableWarnings != preCreateNATableWarnings)
tableConstructionHadWarnings_=TRUE;
const char *lobHdfsServer = CmpCommon::getDefaultString(LOB_HDFS_SERVER);
Int32 lobHdfsPort = (Lng32)CmpCommon::getDefaultNumeric(LOB_HDFS_PORT);
if (hasLobColumn())
{
// read lob related information from lob metadata
short *lobNumList = new (heap_) short[getColumnCount()];
short *lobTypList = new (heap_) short[getColumnCount()];
char **lobLocList = new (heap_) char*[getColumnCount()];
const NAColumnArray &colArray = getNAColumnArray();
NAColumn *nac = NULL;
Lng32 j = 0;
for (CollIndex i = 0; i < getColumnCount(); i++)
{
nac = colArray.getColumn(i);
if (nac->getType()->getTypeQualifier() == NA_LOB_TYPE)
{
lobLocList[j] = new (heap_) char[1024];
j++;
}
}
NAString schNam;
schNam = "\"";
schNam += getTableName().getCatalogName();
schNam += "\".\"";
schNam += getTableName().getSchemaName();
schNam += "\"";
Lng32 numLobs = 0;
Lng32 cliRC = SQL_EXEC_LOBddlInterface
(
(char*)schNam.data(),
schNam.length(),
objectUid().castToInt64(),
numLobs,
LOB_CLI_SELECT_CURSOR,
lobNumList,
lobTypList,
lobLocList,(char *)lobHdfsServer,lobHdfsPort,0,FALSE);
if (cliRC == 0)
{
for (Lng32 i = 0; i < numLobs; i++)
{
nac = colArray.getColumn(lobNumList[i]);
nac->lobNum() = lobNumList[i];
nac->lobStorageType() = (LobsStorage)lobTypList[i];
nac->lobStorageLocation() = lobLocList[i];
}
} // if
} // if
// LCOV_EXCL_STOP
initialSize_ = heap_->getAllocSize();
MonitorMemoryUsage_Exit((char*)mmPhase.data(), heap_, NULL, TRUE);
} // NATable()
#pragma warn(770) // warning elimination
// Constructor for a Hive table
NATable::NATable(BindWA *bindWA,
const CorrName& corrName,
NAMemory *heap,
struct hive_tbl_desc* htbl)
//
// The NATable heap ( i.e. heap_ ) used to come from ContextHeap
// (i.e. heap) but it creates high memory usage/leakage in Context
// Heap. Although the NATables are deleted at the end of each statement,
// the heap_ is returned to heap (i.e. context heap) which caused
// context heap containing a lot of not used chunk of memory. So it is
// changed to be from whatever heap is passed in at the call in
// NATableDB.getNATable.
//
// Now NATable objects can be cached.If an object is to be cached (persisted
// across statements) a NATable heap is allocated for the object
// and is passed in (this is done in NATableDB::get(CorrName& corrName...).
// Otherwise a reference to the Statement heap is passed in. When a cached
// object is to be deleted the object's heap is deleted which wipes out the
// NATable object all its related stuff. NATable objects that are not cached
// are wiped out at the end of the statement when the statement heap is deleted.
//
: heap_(heap),
referenceCount_(0),
refsIncompatibleDP2Halloween_(FALSE),
isHalloweenTable_(FALSE),
qualifiedName_(corrName.getExtendedQualNameObj(),heap),
synonymReferenceName_(heap),
fileSetName_(corrName.getQualifiedNameObj(),heap), // for now, set equal
clusteringIndex_(NULL),
colcount_(0),
colArray_(heap),
recordLength_(0),
indexes_(heap),
vertParts_(heap),
colStats_(NULL),
statsFetched_(FALSE),
viewFileName_(NULL),
viewText_(NULL),
viewTextInNAWchars_(heap),
viewTextCharSet_(CharInfo::UnknownCharSet),
viewCheck_(NULL),
flags_(IS_INSERTABLE | IS_UPDATABLE),
insertMode_(COM_REGULAR_TABLE_INSERT_MODE),
isSynonymTranslationDone_(FALSE),
checkConstraints_(heap),
createTime_(htbl->creationTS_),
redefTime_(htbl->redeftime()),
cacheTime_(0),
statsTime_(0),
catalogUID_(0),
schemaUID_(0),
objectUID_(0),
objectType_(COM_UNKNOWN_OBJECT),
partitioningScheme_(COM_UNKNOWN_PARTITIONING),
uniqueConstraints_(heap),
refConstraints_(heap),
isAnMV_(FALSE),
isAnMVMetaData_(FALSE),
mvsUsingMe_(heap),
mvInfo_(NULL),
accessedInCurrentStatement_(TRUE),
setupForStatement_(FALSE),
resetAfterStatement_(FALSE),
hitCount_(0),
replacementCounter_(2),
sizeInCache_(0),
recentlyUsed_(TRUE),
tableConstructionHadWarnings_(FALSE),
isAnMPTableWithAnsiName_(FALSE),
isUMDTable_(FALSE),
isSMDTable_(FALSE),
isMVUMDTable_(FALSE),
// For virtual tables, we set the object schema version
// to be the current schema version
osv_(COM_VERS_CURR_SCHEMA),
ofv_(COM_VERS_CURR_SCHEMA),
partnsDesc_(NULL),
colsWithMissingStats_(NULL),
originalCardinality_(-1.0),
tableIdList_(heap),
rcb_(NULL),
rcbLen_(0),
keyLength_(0),
parentTableName_(NULL),
sgAttributes_(NULL),
isHive_(TRUE),
isHbase_(FALSE),
isHbaseCell_(FALSE),
isHbaseRow_(FALSE),
isSeabase_(FALSE),
isSeabaseMD_(FALSE),
isSeabasePrivSchemaTable_(FALSE),
isUserUpdatableSeabaseMD_(FALSE),
resetHDFSStatsAfterStmt_(FALSE),
hiveDefaultStringLen_(0),
hiveTableId_(htbl->tblID_),
tableDesc_(NULL),
secKeySet_(heap),
privInfo_(NULL),
newColumns_(heap),
snapshotName_(NULL)
{
NAString tblName = qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString();
NAString mmPhase;
Lng32 preCreateNATableWarnings = CmpCommon::diags()->getNumber(DgSqlCode::WARNING_);
//set heap type
if(heap_ == CmpCommon::statementHeap()){
heapType_ = STATEMENT;
mmPhase = "NATable Init (Stmt) - " + tblName;
}else if (heap_ == CmpCommon::contextHeap()){
heapType_ = CONTEXT;
mmPhase = "NATable Init (Cnxt) - " + tblName;
}else {
heapType_ = OTHER;
mmPhase = "NATable Init (Other) - " + tblName;
}
MonitorMemoryUsage_Enter((char*)mmPhase.data(), heap_, TRUE);
isTrigTempTable_ = FALSE;
insertMode_ =
COM_MULTISET_TABLE_INSERT_MODE; // allow dup, to check
//ComInsertMode::COM_MULTISET_TABLE_INSERT_MODE; // allow dup, to check
//
// Add timestamp information.
//
// To get from Hive
/*
createTime_ = longArrayToInt64(table_desc->body.table_desc.createtime);
redefTime_ = longArrayToInt64(table_desc->body.table_desc.redeftime);
cacheTime_ = longArrayToInt64(table_desc->body.table_desc.cachetime);
*/
// NATable has a schemaUID column, probably should propogate it.
// for now, set to 0.
schemaUID_ = 0;
// Set the objectUID_
// If the HIVE table has been registered in Trafodion, get the objectUID
// from Trafodion, otherwise, set it to 0.
// TBD - does getQualifiedNameObj handle delimited names correctly?
if ( !fetchObjectUIDForNativeTable(corrName) )
return;
if ( objectUID_ > 0 )
setHasExternalTable(TRUE);
// for HIVE objects, the schema owner and table owner is HIVE_ROLE_ID
if (CmpCommon::context()->isAuthorizationEnabled())
{
owner_ = HIVE_ROLE_ID;
schemaOwner_ = HIVE_ROLE_ID;
}
else
{
owner_ = SUPER_USER;
schemaOwner_ = SUPER_USER;
}
if (hasExternalTable())
setupPrivInfo();
// TBD - if authorization is enabled and there is no external table to store
// privileges, go get privilege information from HIVE metadata ...
// TBD - add a check to verify that the column list coming from HIVE matches
// the column list stored in the external table. Maybe some common method
// that can be used to compare other things as well...
objectType_ = COM_BASE_TABLE_OBJECT;
// to check
partitioningScheme_ = COM_UNKNOWN_PARTITIONING;
// to check
rcb_ = 0;
rcbLen_ = 0;
keyLength_ = 0;
partnsDesc_ = NULL;
//
// Insert a NAColumn in the colArray_ for this NATable for each
// columns_desc from the ARK SMD. Returns TRUE if error creating NAColumns.
//
if (createNAColumns(htbl->getColumns(),
this,
colArray_ /*OUT*/,
heap_))
//coverity[leaked_storage]
return;
//
// Set colcount_ after all possible errors (Binder uses nonzero colcount
// as an indicator of valid table definition).
//
// To set it via the new createNAColumns()
colcount_ = colArray_.entries();
// compute record length from colArray
Int32 recLen = 0;
for ( CollIndex i=0; i<colcount_; i++ ) {
recLen += colArray_[i]->getType()->getNominalSize();
}
setRecordLength(recLen);
if (createNAFileSets(htbl /*IN*/,
this /*IN*/,
colArray_ /*IN*/,
indexes_ /*OUT*/,
vertParts_ /*OUT*/,
clusteringIndex_ /*OUT*/,
tableIdList_ /*OUT*/,
heap_,
bindWA
)) {
colcount_ = 0; // indicates failure
return;
}
// HIVE-TBD ignore constraint info creation for now
// If there is a host variable associated with this table, store it
// for use by the generator to generate late-name resolution information.
//
HostVar *hv = corrName.getPrototype();
prototype_ = hv ? new (heap_) HostVar(*hv) : NULL;
// MV
// Initialize the MV support data members
isAnMV_ = FALSE;
isAnMVMetaData_ = FALSE;
Lng32 postCreateNATableWarnings = CmpCommon::diags()->getNumber(DgSqlCode::WARNING_);
if(postCreateNATableWarnings != preCreateNATableWarnings)
tableConstructionHadWarnings_=TRUE;
hiveDefaultStringLen_ = CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH);
if (!(corrName.isSeabaseMD() || corrName.isSpecialTable()))
setupPrivInfo();
// LCOV_EXCL_STOP
initialSize_ = heap_->getAllocSize();
MonitorMemoryUsage_Exit((char*)mmPhase.data(), heap_, NULL, TRUE);
} // NATable()
#pragma warn(770) // warning elimination
NABoolean NATable::doesMissingStatsWarningExist(CollIndexSet & colsSet) const
{
return colsWithMissingStats_->contains(&colsSet);
}
NABoolean NATable::insertMissingStatsWarning(CollIndexSet colsSet) const
{
CollIndexSet * setOfColsWithMissingStats = new (STMTHEAP) CollIndexSet (colsSet);
Int32 someVar = 1;
CollIndexSet * result = colsWithMissingStats_->insert(setOfColsWithMissingStats, &someVar);
if (result == NULL)
return FALSE;
else
return TRUE;
}
// This gets called in the Optimizer phase -- the Binder phase will already have
// marked columns that were referenced in the query, so that the ustat function
// below can decide which histograms and histints to leave in the stats list
// and which to remove.
//
StatsList &
NATable::getStatistics()
{
if (!statsFetched_)
{
// mark the kind of histograms needed for this table's columns
markColumnsForHistograms();
NAString tblName = qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString();
NAString mmPhase = "NATable getStats - " + tblName;
MonitorMemoryUsage_Enter((char*)mmPhase.data(), NULL, TRUE);
//trying to get statistics for a new statement allocate colStats_
colStats_ = new (CmpCommon::statementHeap()) StatsList(CmpCommon::statementHeap());
// Do not create statistics on the fly for the following tables
if (isAnMV() || isUMDTable() ||
isSMDTable() || isMVUMDTable() ||
isTrigTempTable() )
CURRSTMT_OPTDEFAULTS->setHistDefaultSampleSize(0);
CURRCONTEXT_HISTCACHE->getHistograms(*this);
if ((*colStats_).entries() > 0)
originalCardinality_ = (*colStats_)[0]->getRowcount();
else
originalCardinality_ = ActiveSchemaDB()->getDefaults().getAsDouble(HIST_NO_STATS_ROWCOUNT);
// -----------------------------------------------------------------------
// So now we have read in the contents of the HISTOGRM & HISTINTS
// tables from the system catalog. Before we can use them, we need
// to massage them into a format we can use. In particular, we need
// to make sure that what we read in (which the user may have mucked
// about with) matches the histogram classes' internal semantic
// requirements. Also, we need to generate the MultiColumnUecList.
// ----------------------------------------------------------------------
// what did the user set as the max number of intervals?
NADefaults &defs = ActiveSchemaDB()->getDefaults();
CollIndex maxIntervalCount = defs.getAsLong(HIST_MAX_NUMBER_OF_INTERVALS);
//-----------------------------------------------------------------------------------
// Need to flag the MC colStatsDesc so it is only used for the range partitioning task
// and not any cardinality calculations tasks. Flagging it also makes the logic
// to check fo the presence for this MC easier (at the time we need to create
// the range partitioning function)
//-----------------------------------------------------------------------------------
if (CmpCommon::getDefault(HBASE_RANGE_PARTITIONING_MC_SPLIT) == DF_ON &&
!(*colStats_).allFakeStats())
{
CollIndex currentMaxsize = 1;
Int32 posMCtoUse = -1;
NAColumnArray partCols;
if (getClusteringIndex()->getPartitioningKeyColumns().entries() > 0)
partCols = getClusteringIndex()->getPartitioningKeyColumns();
else
partCols = getClusteringIndex()->getIndexKeyColumns();
CollIndex partColNum = partCols.entries();
// look for MC histograms that have multiple intervals and whose columns are a prefix for the
// paritition column list. If multiple pick the one with the most matching columns
for (Int32 i=0; i < (*colStats_).entries(); i++)
{
NAColumnArray statsCols = (*colStats_)[i]->getStatColumns();
CollIndex colNum = statsCols.entries();
CollIndex j = 0;
NABoolean potentialMatch = TRUE;
if ((colNum > currentMaxsize) &&
(!(*colStats_)[i]->isSingleIntHist()) && // no SIH -- number of histograms is large enough to do splitting
(colNum <= partColNum))
{
while ((j < colNum) && potentialMatch)
{
j++;
NAColumn * col = partCols[j-1];
if (statsCols[j-1]->getPosition() != partCols[j-1]->getPosition())
{
potentialMatch = FALSE;
break;
}
}
}
else
{
potentialMatch = FALSE;
}
if (potentialMatch)
{
currentMaxsize = j;
posMCtoUse = i;
}
// we got what we need, just return
if (potentialMatch && (currentMaxsize == partColNum))
{
break;
}
}
if (posMCtoUse >= 0)
{
(*colStats_)[posMCtoUse]->setMCforHbasePartitioning (TRUE);
}
}
// *************************************************************************
// FIRST: Generate the stats necessary to later create the
// MultiColumnUecList; then filter out the multi-column histograms
// because later code doesn't know how to handle them
// In the same loop, also mark another flag for originally fake histogram
// This is to differentiate the cases when the histogram is fake because
// it has no statistics and the case where the histogram has been termed
// fake by the optimizer because its statistics is no longer reliable.
// *************************************************************************
CollIndex i ;
for ( i = 0 ; i < (*colStats_).entries() ; /* no automatic increment */ )
{
// the StatsList has two lists which it uses to store the information we
// need to fill the MultiColumnUecList with <table-col-list,uec value> pairs:
//
// LIST(NAColumnArray) groupUecColumns_
// LIST(CostScalar) groupUecValues_
//
// ==> insert the NAColumnArray & uec total values for each
// entry in colStats_
// don't bother storing multicolumnuec info for fake histograms
// but do set the originallly fake histogram flag to TRUE
if ( (*colStats_)[i]->isFakeHistogram() )
(*colStats_)[i]->setOrigFakeHist(TRUE);
else
{
NAColumnArray cols = (*colStats_)[i]->getStatColumns() ;
(*colStats_).groupUecColumns_.insert(cols) ;
CostScalar uecs = (*colStats_)[i]->getTotalUec() ;
(*colStats_).groupUecValues_.insert(uecs) ;
if (CmpCommon::getDefault(USTAT_COLLECT_MC_SKEW_VALUES) == DF_ON)
{
MCSkewedValueList mcSkewedValueList = (*colStats_)[i]->getMCSkewedValueList() ;
(*colStats_).groupMCSkewedValueLists_.insert(mcSkewedValueList) ;
}
}
// MCH:
// once we've stored the column/uec information, filter out the
// multi-column histograms, since our synthesis code doesn't
// handle them
if (( (*colStats_)[i]->getStatColumns().entries() != 1) &&
(!(*colStats_)[i]->isMCforHbasePartitioning()))
{
(*colStats_).removeAt(i) ;
}
else
{
i++ ; // in-place removal from a list is a bother!
}
}
// *************************************************************************
// SECOND: do some fixup work to make sure the histograms maintain
// the semantics we later expect (& enforce)
// *************************************************************************
// -------------------------------------------------------------------------
// HISTINT fixup-code : char-string histograms
// -------------------------------------------------------------------------
// problem arises with HISTINTs that are for char* columns
// here's what we can get:
//
// Rows Uec Value
// ---- --- -----
// 0 0 "value"
// 10 5 "value"
//
// this is not good! The problem is our (lousy) encoding of
// char strings into EncodedValue's
//
// After much deliberation, here's our current fix:
//
// Rows Uec Value
// ---- --- -----
// 0 0 "valu" <-- reduce the min value of 1st interval
// 10 5 "value" by a little bit
//
// When we find two intervals like this where they aren't the
// first intervals in the histogram, we simply merge them into
// one interval (adding row/uec information) and continue; note
// that in this case, we haven't actually lost any information;
// we've merely made sense out of (the garbage) what we've got
//
// -------------------------------------------------------------------------
// additional HISTINT fixup-code
// -------------------------------------------------------------------------
// 1. If there are zero or one HISTINTs, then set the HISTINTs to match
// the max/min information contained in the COLSTATS object.
//
// 2. If there are any HISTINTs whose boundary values are out-of-order,
// we abort with an an ERROR message.
//
// 3. If there is a NULL HISTINT at the end of the Histogram, then we
// need to make sure there are *TWO* NULL HISTINTS, to preserve correct
// histogram semantics for single-valued intervals.
// -------------------------------------------------------------------------
CollIndex j ;
for ( i = 0 ; i < (*colStats_).entries() ; i++ )
{
// we only worry about histograms on char string columns
// correction: it turns out that these semantically-deranged
// ---------- histograms were being formed for other, non-char string
// columns, so we commented out the code below
// if ( colStats_[i]->getStatColumns()[0]->getType()->getTypeQualifier() !=
// NA_CHARACTER_TYPE)
// continue ; // not a string, skip to next
ColStatsSharedPtr stats = (*colStats_)[i] ;
HistogramSharedPtr hist = stats->getHistogramToModify() ;
// histograms for key columns of a table that are not
// referenced in the query are read in with zero intervals
// (to conserve memory); however, internal
// histogram-semantic checking code assumes that any
// histogram which has zero intervals is FAKE; however
// however, MDAM will not be chosen in the case where one of
// the histograms for a key column is FAKE. Thus -- we will
// avoid this entire issue by creating a single interval for
// any Histograms that we read in that are empty.
if ( hist->entries() < 2 )
{
if(stats->getMinValue() > stats->getMaxValue())
{
*CmpCommon::diags() << DgSqlCode(CATALOG_HISTOGRM_HISTINTS_TABLES_CONTAIN_BAD_VALUE)
<< DgString0("")
<< DgString1(stats->getStatColumns()[0]->getFullColRefNameAsAnsiString().data() );
stats->createFakeHist();
continue;
}
stats->setToSingleInterval ( stats->getMinValue(),
stats->getMaxValue(),
stats->getRowcount(),
stats->getTotalUec() ) ;
// now we have to undo some of the automatic flag-setting
// of ColStats::setToSingleInterval()
stats->setMinSetByPred (FALSE) ;
stats->setMaxSetByPred (FALSE) ;
stats->setShapeChanged (FALSE) ;
continue ; // skip to next ColStats
}
// NB: we'll handle the first Interval last
for ( j = 1 ; j < hist->entries()-1 ; /* no automatic increment */ )
{
if ( (*hist)[j].getUec() == 0 || (*hist)[j].getCardinality() == 0 )
{
hist->removeAt(j) ;
continue ; // don't increment, loop again
}
// intervals must be in order!
if ( (*hist)[j].getBoundary() > (*hist)[j+1].getBoundary() )
{
*CmpCommon::diags() <<
DgSqlCode(CATALOG_HISTINTS_TABLES_CONTAIN_BAD_VALUES)
<< DgInt0(j)
<< DgInt1(j+1)
<< DgString1(stats->getStatColumns()[0]->getFullColRefNameAsAnsiString().data() );
stats->createFakeHist();
break ; // skip to next ColStats
}
if ( (*hist)[j].getBoundary() == (*hist)[j+1].getBoundary() )
{
// merge Intervals, if the two consecutive intervals have same
// boundaries and these are not single valued (UEC > 1)
// If there are more two single valued intervals, then merge
// all except the last one.
NABoolean mergeIntervals = FALSE;
if (CmpCommon::getDefault(COMP_BOOL_79) == DF_ON)
{
mergeIntervals = TRUE;
if( (j < (hist->entries() - 2)) && ((*hist)[j+1].getUec() == 1) &&
((*hist)[j+1].getBoundary() != (*hist)[j+2].getBoundary())
||
(j == (hist->entries() - 2)) && ((*hist)[j+1].getUec() == 1) )
mergeIntervals = FALSE;
}
else
{
if ( (*hist)[j+1].getUec() > 1)
mergeIntervals = TRUE;
}
if ( mergeIntervals )
{
// if the intervals with same boundary are not SVI, just merge them
// together.
// Also do the merge, if there are more than one SVIs with same
// encoded interval boundary. Example, we want to avoid intervals
// such as
// boundary inclusive_flag UEC
// 12345.00 < 1
// 12345.00 < 1
// 12345.00 <= 1
// These would be changed to
// 12345.00 < 2
// 12345.00 <= 1
CostScalar combinedRows = (*hist)[ j ].getCardinality() +
(*hist)[j+1].getCardinality() ;
CostScalar combinedUec = (*hist)[ j ].getUec() +
(*hist)[j+1].getUec() ;
(*hist)[j].setCardAndUec (combinedRows, combinedUec) ;
stats->setIsColWithBndryConflict(TRUE);
hist->removeAt(j+1) ;
}
else
{
// for some reason, some SVI's aren't being
// generated correctly!
(*hist)[j].setBoundIncl(FALSE) ;
(*hist)[j+1].setBoundIncl(TRUE) ;
j++;
}
}
else
j++ ; // in-place removal from a list is a bother!
} // loop over intervals
// ----------------------------------------------------------------------
// now we handle the first interval
//
// first, it must be in order w.r.t. the second interval!
if ( (*hist)[0].getBoundary() > (*hist)[1].getBoundary() )
{
*CmpCommon::diags() <<
DgSqlCode(CATALOG_HISTINTS_TABLES_CONTAIN_BAD_VALUES)
<< DgInt0(0)
<< DgInt1(1)
<< DgString1(stats->getStatColumns()[0]->getFullColRefNameAsAnsiString().data() );
stats->createFakeHist();
continue ; // skip to next ColStats
}
// second, handle the case where first and second interval are the same
if ( hist->entries() > 1 && // avoid the exception! might just be a single NULL
// // interval after the loop above
(*hist)[0].getBoundary() == (*hist)[1].getBoundary() &&
(*hist)[1].getUec() > 1 )
{
const double KLUDGE_VALUE = 0.0001 ;
const double oldVal = (*hist)[0].getBoundary().getDblValue() ;
const EncodedValue newVal =
EncodedValue(oldVal - (_ABSOLUTE_VALUE_(oldVal) * KLUDGE_VALUE)) ; // kludge alert!
//Absolute of oldval due to CR 10-010426-2457
(*hist)[0].setBoundary( newVal ) ;
(*hist)[0].setBoundIncl( FALSE ) ; // no longer a real boundary!
(*colStats_)[i]->setMinValue( newVal ) ; // set aggr info also
}
// done with first interval
// ----------------------------------------------------------------------
//
// NULL values must only be stored in single-valued intervals
// in the histograms ; so, just in case we're only getting
// *one* HistInt for the NULL interval, insert a 2nd one
//
// 0 1 2
// | | |
// | | | entries() == 3
// NULL
//
// 0 1 2 3
// | | | |
// | | | | entries() == 4
// new NULL
// NULL
//
if ( hist->lastHistInt().isNull() )
{
CollIndex count = hist->entries() ;
if ( !(*hist)[count-2].isNull() )
{
// insert a 2nd NULL HISTINT, with boundaryIncl value FALSE
HistInt secondLast (hist->lastHistInt().getBoundary(), FALSE) ;
hist->insertAt(count-1,secondLast) ;
// new HISTINT by default has row/uec of 0, which is what we want
}
}
//
// Now, reduce the total number of intervals to be the number
// that the user wants. This is used to test the tradeoffs
// between compile time & rowcount estimation.
//
(*colStats_)[i]->setMaxIntervalCount (maxIntervalCount) ;
(*colStats_)[i]->reduceToMaxIntervalCount () ;
if ((*colStats_)[i]->getRowcount() == (*colStats_)[i]->getTotalUec() )
(*colStats_)[i]->setAlmostUnique(TRUE);
} // outer for loop -- done with this COLSTATS, continue with next one
// ***********************************************************************
statsFetched_ = TRUE;
MonitorMemoryUsage_Exit((char*)mmPhase.data(), NULL, NULL, TRUE);
} // !statsFetched_
return (*colStats_);
}
StatsList &
NATable::generateFakeStats()
{
if (colStats_ == NULL)
{
//trying to get statistics for a new statement allocate colStats_
colStats_ = new (CmpCommon::statementHeap()) StatsList(CmpCommon::statementHeap());
}
if (colStats_->entries() > 0)
return (*colStats_);
NAColumnArray colList = getNAColumnArray() ;
double defaultFakeRowCount = (ActiveSchemaDB()->getDefaults()).getAsDouble(HIST_NO_STATS_ROWCOUNT);
double defaultFakeUec = (ActiveSchemaDB()->getDefaults()).getAsDouble(HIST_NO_STATS_UEC);
if ( isHiveTable() ) {
defaultFakeRowCount = getOriginalRowCount().value();
}
/* if ( isHbaseTable() ) {
defaultFakeRowCount = getOriginalRowCount().value();
}
*/
for (CollIndex i = 0; i < colList.entries(); i++ )
{
NAColumn * col = colList[i];
if (col->isUnique() )
defaultFakeUec = defaultFakeRowCount;
else
defaultFakeUec = MINOF(defaultFakeUec, defaultFakeRowCount);
EncodedValue dummyVal(0.0);
EncodedValue lowBound = dummyVal.minMaxValue(col->getType(), TRUE);
EncodedValue highBound = dummyVal.minMaxValue(col->getType(), FALSE);
HistogramSharedPtr emptyHist(new (HISTHEAP) Histogram(HISTHEAP));
HistInt newFirstHistInt(lowBound, FALSE);
HistInt newSecondHistInt(highBound, TRUE);
newSecondHistInt.setCardAndUec(defaultFakeRowCount,
defaultFakeUec);
emptyHist->insert(newFirstHistInt);
emptyHist->insert(newSecondHistInt);
ComUID histid(NA_JulianTimestamp());
ColStatsSharedPtr fakeColStats(
new (HISTHEAP) ColStats(histid,
defaultFakeUec,
defaultFakeRowCount,
defaultFakeRowCount,
col->isUnique(),
FALSE,
emptyHist,
FALSE,
1.0,
1.0,
-1, // avg varchar size
HISTHEAP));
fakeColStats->setFakeHistogram(TRUE);
fakeColStats->setOrigFakeHist(TRUE);
fakeColStats->setMinValue(lowBound);
fakeColStats->setMaxValue(highBound);
fakeColStats->statColumns().insert(col);
colStats_->insert(fakeColStats);
}
setStatsFetched(TRUE);
setOriginalRowCount(defaultFakeRowCount);
return (*colStats_);
}
NABoolean NATable::rowsArePacked() const
{
// If one fileset is packed, they all are
return (getVerticalPartitionList().entries() &&
getVerticalPartitionList()[0]->isPacked());
}
// MV
// Read materialized view information from the catalog manager.
MVInfoForDML *NATable::getMVInfo(BindWA *bindWA)
{
return mvInfo_;
}
// MV
// An MV is usable unly when it is initialized and not unavailable.
// If not initialized, keep a list and report error at runtime.
NABoolean NATable::verifyMvIsInitializedAndAvailable(BindWA *bindWA) const
{
CMPASSERT(isAnMV());
const ComMvAttributeBitmap& bitmap = getMvAttributeBitmap();
// First check if the table is Unavailable.
NAString value;
if (bitmap.getIsMvUnAvailable())
{
// 12312 Materialized View $0~TableName is unavailable.
*CmpCommon::diags() << DgSqlCode(-12312)
<< DgTableName(getTableName().getQualifiedNameAsString());
bindWA->setErrStatus();
return TRUE;
}
// if the mv is uninitialized,
// add it to the uninitializedMvList in the BindWA
if (bitmap.getIsMvUnInitialized())
{
// get physical and ansi names
NAString fileName(
getClusteringIndex()->getFileSetName().getQualifiedNameAsString(),
bindWA->wHeap() );
NAString ansiName( getTableName().getQualifiedNameAsAnsiString(),
bindWA->wHeap() );
// get physical and ansi name
bindWA->addUninitializedMv(
convertNAString( fileName, bindWA->wHeap() ),
convertNAString( ansiName, bindWA->wHeap() ) );
}
return FALSE;
}
// Return value: TRUE, found an index or constr. FALSE, not found.
// explicitIndex: get explicitly created index
// uniqueIndex: TRUE, get unique index. FALSE, any index.
//
// primaryKeyOnly: TRUE, get primary key
// indexName: return index name, if passed in
// lookForSameSequenceOfCols: TRUE, look for an index in which the
// columns appear in the same sequence
// as in inputCols (whether they are ASC or
// DESC doesn't matter).
// FALSE, accept any index that has the
// same columns, in any sequence.
NABoolean NATable::getCorrespondingIndex(NAList<NAString> &inputCols,
NABoolean lookForExplicitIndex,
NABoolean lookForUniqueIndex,
NABoolean lookForPrimaryKey,
NABoolean lookForAnyIndexOrPkey,
NABoolean lookForSameSequenceOfCols,
NABoolean excludeAlwaysComputedSystemCols,
NAString *indexName)
{
NABoolean indexFound = FALSE;
CollIndex numInputCols = inputCols.entries();
if (numInputCols == 0)
{
lookForPrimaryKey = TRUE;
lookForUniqueIndex = FALSE;
lookForAnyIndexOrPkey = FALSE;
}
Lng32 numBTpkeys = getClusteringIndex()->getIndexKeyColumns().entries();
const NAFileSetList &indexList = getIndexList();
for (Int32 i = 0; (NOT indexFound && (i < indexList.entries())); i++)
{
NABoolean isPrimaryKey = FALSE;
NABoolean isUniqueIndex = FALSE;
const NAFileSet * naf = indexList[i];
if (naf->getKeytag() == 0)
isPrimaryKey = TRUE;
else if (naf->uniqueIndex())
isUniqueIndex = TRUE;
if ((NOT lookForPrimaryKey) && (isPrimaryKey))
continue;
NABoolean found = FALSE;
if (lookForAnyIndexOrPkey)
found = TRUE;
else if (lookForPrimaryKey && isPrimaryKey)
found = TRUE;
else if (lookForUniqueIndex && isUniqueIndex)
found = TRUE;
if (found)
{
if (lookForExplicitIndex) // need an explicit index to match.
{
if ((naf->isCreatedExplicitly()) ||
(isPrimaryKey))
found = TRUE;
else
found = FALSE;
}
}
if (NOT found)
continue;
Int32 numMatchedCols = 0;
NABoolean allColsMatched = TRUE;
if (numInputCols > 0)
{
const NAColumnArray &nacArr = naf->getIndexKeyColumns();
Lng32 numKeyCols = naf->getCountOfColumns(
TRUE, // exclude non-key cols
!isPrimaryKey, // exclude cols other than user-specified index cols
FALSE, // don't exclude all system cols like SYSKEY
excludeAlwaysComputedSystemCols);
// compare # of columns first and disqualify the index
// if it doesn't have the right number of columns
if (numInputCols != numKeyCols)
continue;
// compare individual key columns with the provided input columns
for (Int32 j = 0; j < nacArr.entries() && allColsMatched; j++)
{
NAColumn *nac = nacArr[j];
// exclude the same types of columns that we excluded in
// the call to naf->getCountOfColumns() above
if (!isPrimaryKey &&
nac->getIndexColName() == nac->getColName())
continue;
if (excludeAlwaysComputedSystemCols &&
nac->isComputedColumnAlways() && nac->isSystemColumn())
continue;
const NAString &keyColName = nac->getColName();
NABoolean colFound = FALSE;
// look up the key column name in the provided input columns
if (lookForSameSequenceOfCols)
{
// in this case we know exactly where to look
colFound = (keyColName == inputCols[numMatchedCols]);
}
else
for (Int32 k = 0; !colFound && k < numInputCols; k++)
{
if (keyColName == inputCols[k])
colFound = TRUE;
} // loop over provided input columns
if (colFound)
numMatchedCols++;
else
allColsMatched = FALSE;
} // loop over key columns of the index
if (allColsMatched)
{
// just checking that the above loop and
// getCountOfColumns() don't disagree
CMPASSERT(numMatchedCols == numKeyCols);
indexFound = TRUE;
}
} // inputCols specified
else
indexFound = TRUE; // primary key, no input cols specified
if (indexFound)
{
if (indexName)
{
*indexName = naf->getExtFileSetName();
}
}
} // loop over indexes of the table
return indexFound;
}
NABoolean NATable::getCorrespondingConstraint(NAList<NAString> &inputCols,
NABoolean uniqueConstr,
NAString *constrName,
NABoolean * isPkey,
NAList<int> *reorderList)
{
NABoolean constrFound = FALSE;
NABoolean lookForPrimaryKey = (inputCols.entries() == 0);
const AbstractRIConstraintList &constrList =
(uniqueConstr ? getUniqueConstraints() : getRefConstraints());
if (isPkey)
*isPkey = FALSE;
for (Int32 i = 0; (NOT constrFound && (i < constrList.entries())); i++)
{
AbstractRIConstraint *ariConstr = constrList[i];
if (uniqueConstr && (ariConstr->getOperatorType() != ITM_UNIQUE_CONSTRAINT))
continue;
if (lookForPrimaryKey && (NOT ((UniqueConstraint*)ariConstr)->isPrimaryKeyConstraint()))
continue;
if ((NOT uniqueConstr) && (ariConstr->getOperatorType() != ITM_REF_CONSTRAINT))
continue;
if (NOT lookForPrimaryKey)
{
Int32 numUniqueCols = 0;
NABoolean allColsMatched = TRUE;
NABoolean reorderNeeded = FALSE;
if (reorderList)
reorderList->clear();
for (Int32 j = 0; j < ariConstr->keyColumns().entries() && allColsMatched; j++)
{
// The RI constraint contains a dummy NAColumn, get to the
// real one to test for computed columns
NAColumn *nac = getNAColumnArray()[ariConstr->keyColumns()[j]->getPosition()];
if (nac->isComputedColumnAlways() && nac->isSystemColumn())
// always computed system columns in the key are redundant,
// don't include them (also don't include them in the DDL)
continue;
const NAString &uniqueColName = (ariConstr->keyColumns()[j])->getColName();
NABoolean colFound = FALSE;
// compare the unique column name to the provided input columns
for (Int32 k = 0; !colFound && k < inputCols.entries(); k++)
if (uniqueColName == inputCols[k])
{
colFound = TRUE;
numUniqueCols++;
if (reorderList)
reorderList->insert(k);
if (j != k)
// inputCols and key columns come in different order
// (order/sequence of column names, ignoring ASC/DESC)
reorderNeeded = TRUE;
}
if (!colFound)
allColsMatched = FALSE;
}
if (inputCols.entries() == numUniqueCols && allColsMatched)
{
constrFound = TRUE;
if (reorderList && !reorderNeeded)
reorderList->clear();
}
}
else
{
// found the primary key constraint we were looking for
constrFound = TRUE;
}
if (constrFound)
{
if (constrName)
{
*constrName = ariConstr->getConstraintName().getQualifiedNameAsAnsiString();
}
if (isPkey)
{
if ((uniqueConstr) && (((UniqueConstraint*)ariConstr)->isPrimaryKeyConstraint()))
*isPkey = TRUE;
}
}
else
if (reorderList)
reorderList->clear();
}
return constrFound;
}
void NATable::setupPrivInfo()
{
Int32 thisUserID = ComUser::getCurrentUser();
NAString privMDLoc = CmpSeabaseDDL::getSystemCatalogStatic();
privMDLoc += ".\"";
privMDLoc += SEABASE_PRIVMGR_SCHEMA;
privMDLoc += "\"";
PrivMgrCommands privInterface(privMDLoc.data(), CmpCommon::diags(),PrivMgr::PRIV_INITIALIZED);
if (privInterface.isPrivMgrTable(
qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString().data()))
{
isSeabasePrivSchemaTable_ = TRUE;
return;
}
privInfo_ = new(heap_) PrivMgrUserPrivs;
if ((!isSeabaseTable() && !isHiveTable()) ||
!CmpCommon::context()->isAuthorizationEnabled() ||
isVolatileTable() ||
ComUser::isRootUserID()||
ComUser::getCurrentUser() == owner_)
{
privInfo_->setOwnerDefaultPrivs();
return;
}
std::vector <ComSecurityKey *> secKeyVec;
bool testError = false;
#ifndef NDEBUG
char *tpie = getenv("TEST_PRIV_INTERFACE_ERROR");
if (tpie && *tpie == '1')
testError = true;
#endif
// use embedded compiler.
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler(CmpContextInfo::CMPCONTEXT_TYPE_META))
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
*CmpCommon::diags() << DgSqlCode( -4400 );
return;
}
if (testError || (STATUS_GOOD !=
privInterface.getPrivileges(objectUid().get_value(), objectType_,
thisUserID, *privInfo_, &secKeyVec)))
{
if (testError)
#ifndef NDEBUG
*CmpCommon::diags() << DgSqlCode(-8142) <<
DgString0("TEST_PRIV_INTERFACE_ERROR") << DgString1(tpie) ;
#else
abort();
#endif
NADELETE(privInfo_, PrivMgrUserPrivs, heap_);
privInfo_ = NULL;
cmpSBD.switchBackCompiler();
return;
}
CMPASSERT (privInfo_);
cmpSBD.switchBackCompiler();
for (std::vector<ComSecurityKey*>::iterator iter = secKeyVec.begin();
iter != secKeyVec.end();
iter++)
{
// Insertion of the dereferenced pointer results in NASet making
// a copy of the object, and then we delete the original.
secKeySet_.insert(**iter);
delete *iter;
}
}
// Query the metadata to find the object uid of the table. This is used when
// the uid for a metadata table is requested, since 0 is usually stored for
// these tables.
//
Int64 NATable::lookupObjectUid()
{
QualifiedName qualName = getExtendedQualName().getQualifiedNameObj();
objectUID_ = lookupObjectUidByName(qualName, objectType_, FALSE);
if (objectUID_ <= 0 && CmpCommon::diags()->mainSQLCODE() >= 0)
// object not found, no serious error
objectUID_ = 0;
return objectUID_.get_value();
}
bool NATable::isEnabledForDDLQI() const
{
if (isSeabaseMD_ || isSMDTable_ || (getSpecialType() == ExtendedQualName::VIRTUAL_TABLE))
return false;
else
{
if (objectUID_.get_value() == 0)
{
// Looking up object UIDs at code-gen time was shown to cause
// more than 10% performance regression in YCSB benchmark. In
// that investigation, we learned that metadata and histogram
// NATables would have no object UID at code-gen and would
// require the lookup. We're pretty sure these are the only
// types of tables but will abend here otherwise. If this
// causes problems, the envvar below can be used as a
// temporary workaround.
char *noAbendOnLp1398600 = getenv("NO_ABEND_ON_LP_1398600");
if (!noAbendOnLp1398600 || *noAbendOnLp1398600 == '0')
abort();
}
return true;
}
}
NATable::~NATable()
{
// remove the map entries of associated table identifers in
// NAClusterInfo::tableToClusterMap_.
CMPASSERT(gpClusterInfo);
NAColumn *col;
NABoolean delHeading = ActiveSchemaDB()->getNATableDB()->cachingMetaData();
const LIST(CollIndex) & tableIdList = getTableIdList();
if (privInfo_)
{
NADELETE(privInfo_, PrivMgrUserPrivs, heap_);
privInfo_ = NULL;
}
if (! isHive_) {
for (int i = 0 ; i < colcount_ ; i++) {
col = (NAColumn *)colArray_[i];
if (delHeading) {
if (col->getDefaultValue())
NADELETEBASIC(col->getDefaultValue(), heap_);
if (col->getHeading())
NADELETEBASIC(col->getHeading(), heap_);
if (col->getComputedColumnExprString())
NADELETEBASIC(col->getComputedColumnExprString(),heap_);
}
NADELETE(col->getType(), NAType, heap_);
NADELETE(col, NAColumn, heap_);
}
colArray_.clear();
}
if (parentTableName_ != NULL)
{
NADELETEBASIC(parentTableName_, heap_);
parentTableName_ = NULL;
}
if (snapshotName_ != NULL)
{
NADELETEBASIC(snapshotName_, heap_);
snapshotName_ = NULL;
}
if (viewText_ != NULL)
{
NADELETEBASIC(viewText_, heap_);
viewText_ = NULL;
}
if (viewCheck_ != NULL)
{
NADELETEBASIC(viewCheck_, heap_);
viewCheck_ = NULL;
}
if (viewFileName_ != NULL)
{
NADELETEBASIC(viewFileName_, heap_);
viewFileName_ = NULL;
}
if (prototype_ != NULL)
{
NADELETE(prototype_, HostVar, heap_);
prototype_ = NULL;
}
if (sgAttributes_ != NULL)
{
NADELETE(sgAttributes_, SequenceGeneratorAttributes, heap_);
sgAttributes_ = NULL;
}
// clusteringIndex_ is part of indexes - No need to delete clusteringIndex_
CollIndex entryCount = indexes_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE(indexes_[i], NAFileSet, heap_);
}
indexes_.clear();
entryCount = vertParts_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE(vertParts_[i], NAFileSet, heap_);
}
vertParts_.clear();
entryCount = newColumns_.entries();
for (int i = 0 ; i < entryCount ; i++) {
col = (NAColumn *)newColumns_[i];
NADELETE(col, NAColumn, heap_);
}
newColumns_.clear();
entryCount = checkConstraints_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE(checkConstraints_[i], CheckConstraint, heap_);
}
checkConstraints_.clear();
entryCount = uniqueConstraints_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE((UniqueConstraint *)uniqueConstraints_[i], UniqueConstraint, heap_);
}
uniqueConstraints_.clear();
entryCount = refConstraints_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE((RefConstraint *)refConstraints_[i], RefConstraint, heap_);
}
refConstraints_.clear();
entryCount = mvsUsingMe_.entries();
for (CollIndex i = 0 ; i < entryCount; i++) {
NADELETE(mvsUsingMe_[i], UsingMvInfo, heap_);
}
mvsUsingMe_.clear();
// mvInfo_ is not used at all
// tableIDList_ is list of ints - No need to delete the entries
// colStats_ and colsWithMissingStats_ comes from STMTHEAP
// secKeySet_ is the set that holds ComSecurityKeySet object itself
}
void NATable::resetAfterStatement() // ## to be implemented?
{
if(resetAfterStatement_)
return;
//It is not clear to me whether virtual tables and resource forks
//(any "special" table type) can/should be reused. Maybe certain
//types can; I just have no idea right now. But as we're not reading
//metadata tables for them anyway, there seems little savings in
//caching them; perhaps we should just continue to build them on the fly.
//
//All the real metadata in NATable members can stay as it is.
//But there are a few pieces of for-this-query-only data:
referenceCount_ = 0;
refsIncompatibleDP2Halloween_ = FALSE;
isHalloweenTable_ = FALSE;
//And we now optimize/filter/reduce histogram statistics for each
//individual query, so stats and adminicular structures must be reset:
statsFetched_ = FALSE;
//set this to NULL, the object pointed to by mvInfo_ is on the
//statement heap, for the next statement this will be set again
//this is set in 'MVInfoForDML *NATable::getMVInfo' which is called
//in the binder after the construction of the NATable. Therefore
//This will be set for every statement
mvInfo_ = NULL;
//delete/clearAndDestroy colStats_
//set colStats_ pointer to NULL the object itself is deleted when
//the statement heap is disposed at the end of a statement
colStats_ = NULL;
//mark table as unaccessed for following statements
accessedInCurrentStatement_ = FALSE;
//for (i in colArray_) colArray_[i]->setReferenced(FALSE);
for (UInt32 i = 0; i < colArray_.entries(); i++)
{
//reset each NAColumn
if(colArray_[i])
colArray_[i]->resetAfterStatement();
}
//reset the clustering index
if(clusteringIndex_)
clusteringIndex_->resetAfterStatement();
//reset the fileset for indices
for (UInt32 j=0; j < indexes_.entries(); j++)
{
//reset the fileset for each index
if(indexes_[j])
indexes_[j]->resetAfterStatement();
}
//reset the fileset for each vertical partition
for (UInt32 k=0; k < vertParts_.entries(); k++)
{
//reset the fileset for each index
if(vertParts_[k])
vertParts_[k]->resetAfterStatement();
}
// reset the pointers (keyColumns_ in refConstraintsReferencingMe)
// that are referencing the NATable of the 'other table'.
uniqueConstraints_.resetAfterStatement();
// reset the pointers (keyColumns_ in uniqueConstraintsReferencedByMe_)
// that are referencing the NATable of the 'other table'.
refConstraints_.resetAfterStatement();
colsWithMissingStats_ = NULL;
resetAfterStatement_ = TRUE;
setupForStatement_ = FALSE;
sizeAfterLastStatement_ = heap_->getAllocSize();
return;
}
void NATable::setupForStatement()
{
if(setupForStatement_)
return;
if( NOT qualifiedName_.isSpecialTable() )
gpClusterInfo->setMaxOSV(qualifiedName_.getQualifiedNameObj(), osv_);
//reset the clustering index
if(clusteringIndex_)
clusteringIndex_->setupForStatement();
//reset the fileset for indices
for (UInt32 i=0; i < indexes_.entries(); i++)
{
//reset the fileset for each index
if(indexes_[i])
indexes_[i]->setupForStatement();
}
//reset the fileset for each vertical partition
for (UInt32 j=0; j < vertParts_.entries(); j++)
{
//reset the fileset for each index
if(vertParts_[j])
vertParts_[j]->setupForStatement();
}
// We are doing this here, as we want this to be maintained on a per statement basis
colsWithMissingStats_ = new (STMTHEAP) NAHashDictionary<CollIndexSet, Int32>
(&(hashColPosList),107,TRUE,STMTHEAP);
setupForStatement_ = TRUE;
resetAfterStatement_ = FALSE;
return;
}
static void formatPartitionNameString(const NAString &tbl,
const NAString &pName,
NAString &fmtOut)
{
fmtOut = NAString("(TABLE ") + tbl +
", PARTITION " + pName + ")";
}
static void formatPartitionNumberString(const NAString &tbl,
Lng32 pNumber,
NAString &fmtOut)
{
char buf[10];
sprintf(buf, "%d", pNumber);
fmtOut = NAString("(TABLE ") + tbl +
", PARTITION NUMBER " + buf + ")";
}
NABoolean NATable::filterUnusedPartitions(const PartitionClause& pClause)
{
if (pClause.isEmpty())
return TRUE;
if (getViewText())
{
*CmpCommon::diags()
<< DgSqlCode(-1276)
<< DgString0(pClause.getPartitionName())
<< DgTableName(getTableName().getQualifiedNameAsString());
return TRUE;
}
if ((pClause.partnNumSpecified() && pClause.getPartitionNumber() < 0) ||
(pClause.partnNameSpecified() && IsNAStringSpaceOrEmpty(pClause.getPartitionName())))
// Partion Number specified is less than zero or name specified was all blanks.
return TRUE ;
CMPASSERT(indexes_.entries() > 0);
NAFileSet* baseTable = indexes_[0];
PartitioningFunction* oldPartFunc = baseTable->getPartitioningFunction();
CMPASSERT(oldPartFunc);
const NodeMap* oldNodeMap = oldPartFunc->getNodeMap();
CMPASSERT(oldNodeMap);
const NodeMapEntry* oldNodeMapEntry = NULL;
PartitioningFunction* newPartFunc = NULL;
if (pClause.partnRangeSpecified())
{
/* if (NOT oldPartFunc->isAHash2PartitioningFunction())
{
// ERROR 1097 Unable to find specified partition...
*CmpCommon::diags()
<< DgSqlCode(-1097)
<< DgString0("")
<< DgTableName(getTableName().getQualifiedNameAsAnsiString());
return TRUE;
}
*/
NAString errorString;
// partition range specified
if ((pClause.getBeginPartitionNumber() == -1) ||
((pClause.getBeginPartitionNumber() > 0) &&
(oldPartFunc->getCountOfPartitions() >= pClause.getBeginPartitionNumber())))
{
oldPartFunc->setRestrictedBeginPartNumber(
pClause.getBeginPartitionNumber());
}
else
{
formatPartitionNumberString(
getTableName().getQualifiedNameAsAnsiString(),
pClause.getBeginPartitionNumber(), errorString);
}
if ((pClause.getEndPartitionNumber() == -1) ||
((pClause.getEndPartitionNumber() > 0) &&
(oldPartFunc->getCountOfPartitions() >= pClause.getEndPartitionNumber())))
{
oldPartFunc->setRestrictedEndPartNumber(
pClause.getEndPartitionNumber());
}
else
{
formatPartitionNumberString(
getTableName().getQualifiedNameAsAnsiString(),
pClause.getEndPartitionNumber(), errorString);
}
if (NOT errorString.isNull())
{
// ERROR 1097 Unable to find specified partition...
*CmpCommon::diags()
<< DgSqlCode(-1097)
<< DgString0(errorString)
<< DgTableName(getTableName().getQualifiedNameAsAnsiString());
return TRUE;
} // Unable to find specified partition.
} // partition range specified
else
{
// single partition specified
if (pClause.getPartitionNumber() >= 0) // PARTITION NUMBER was specified
{
if ((pClause.getPartitionNumber() > 0) &&
(oldPartFunc->getCountOfPartitions() >= pClause.getPartitionNumber()))
oldNodeMapEntry = oldNodeMap->getNodeMapEntry(pClause.getPartitionNumber()-1);
else
{
NAString errorString;
formatPartitionNumberString(getTableName().getQualifiedNameAsAnsiString(),
pClause.getPartitionNumber(), errorString);
// ERROR 1097 Unable to find specified partition...
*CmpCommon::diags()
<< DgSqlCode(-1097)
<< DgString0(errorString)
<< DgTableName(getTableName().getQualifiedNameAsAnsiString());
return TRUE;
} // Unable to find specified partition.
}
else // PARTITION NAME was specified
{
for (CollIndex i =0; i < oldNodeMap->getNumEntries(); i++)
{
oldNodeMapEntry = oldNodeMap->getNodeMapEntry(i);
if (oldNodeMapEntry->getGivenName() == pClause.getPartitionName())
break;
if ( i == (oldNodeMap->getNumEntries() -1)) // match not found
{
NAString errorString;
formatPartitionNameString(getTableName().getQualifiedNameAsAnsiString(),
pClause.getPartitionName(), errorString);
// ERROR 1097 Unable to find specified partition...
*CmpCommon::diags()
<< DgSqlCode(-1097)
<< DgString0(errorString)
<< DgTableName(getTableName().getQualifiedNameAsAnsiString());
return TRUE;
}
}
}
if (!isHbaseTable())
{
// Create DP2 node map for partitioning function with only the partition requested
NodeMap* newNodeMap = new (heap_) NodeMap(heap_);
NodeMapEntry newEntry((char *)oldNodeMapEntry->getPartitionName(),
(char *)oldNodeMapEntry->getGivenName(),
heap_,oldNodeMap->getTableIdent());
newNodeMap->setNodeMapEntry(0,newEntry,heap_);
newNodeMap->setTableIdent(oldNodeMap->getTableIdent());
/* if (oldPartFunc->getPartitioningFunctionType() ==
PartitioningFunction::ROUND_ROBIN_PARTITIONING_FUNCTION)
{
// For round robin partitioning, must create the partitioning function
// even for one partition, since the SYSKEY must be generated for
// round robin and this is trigger off the partitioning function.
newPartFunc = new (heap) RoundRobinPartitioningFunction(1, newNodeMap, heap_);
}
else */
newPartFunc = new (heap_) SinglePartitionPartitioningFunction(newNodeMap, heap_);
baseTable->setPartitioningFunction(newPartFunc);
baseTable->setCountOfFiles(1);
baseTable->setHasRemotePartitions(checkRemote(NULL,
(char *)oldNodeMapEntry->getPartitionName()));
// for now we are not changing indexlevels_ It could potentially be larger than the
// number of index levels for the requested partition.
QualifiedName physicalName(oldNodeMapEntry->getPartitionName(),
1, heap_, NULL);
baseTable->setFileSetName(physicalName);
}
else
{
// For HBase tables, we attach a predicate to select a single partition in Scan::bindNode
oldPartFunc->setRestrictedBeginPartNumber(pClause.getPartitionNumber());
oldPartFunc->setRestrictedEndPartNumber(pClause.getPartitionNumber());
}
} // single partition specified
return FALSE;
}
const LIST(CollIndex) &
NATable::getTableIdList() const
{
return tableIdList_;
}
void NATable::resetReferenceCount()
{
referenceCount_ = 0;
refsIncompatibleDP2Halloween_ = FALSE;
isHalloweenTable_ = FALSE;
}
void NATable::decrReferenceCount()
{
--referenceCount_;
if (referenceCount_ == 0)
{
refsIncompatibleDP2Halloween_ = FALSE;
isHalloweenTable_ = FALSE;
}
}
CollIndex NATable::getUserColumnCount() const
{
CollIndex result = 0;
for (CollIndex i=0; i<colArray_.entries(); i++)
if (colArray_[i]->isUserColumn())
result++;
return result;
}
// NATableDB function definitions
NATable * NATableDB::get(const ExtendedQualName* key, BindWA* bindWA, NABoolean findInCacheOnly)
{
//get the cached NATable entry
NATable * cachedNATable =
NAKeyLookup<ExtendedQualName,NATable>::get(key);
//entry not found in cache
if(!cachedNATable)
return NULL;
//This flag determines if a cached object should be deleted and
//reconstructed
NABoolean removeEntry = FALSE;
if ( cachedNATable->isHbaseTable() ) {
const NAFileSet* naSet = cachedNATable -> getClusteringIndex();
if ( naSet ) {
PartitioningFunction* pf = naSet->getPartitioningFunction();
if ( pf ) {
NABoolean rangeSplitSaltedTable =
CmpCommon::getDefault(HBASE_HASH2_PARTITIONING) == DF_OFF ||
(bindWA && bindWA->isTrafLoadPrep());
// if force to range partition a salted table, and the salted table is
// not a range, do not return the cached object.
if ( rangeSplitSaltedTable &&
cachedNATable->hasSaltedColumn() &&
pf->castToHash2PartitioningFunction() ) {
removeEntry = TRUE;
} else
// if force to hash2 partition a salted table, and the cached table is
// not a hash2, do not return the cached object.
if (
CmpCommon::getDefault(HBASE_HASH2_PARTITIONING) != DF_OFF &&
cachedNATable->hasSaltedColumn() &&
pf->castToHash2PartitioningFunction() == NULL
)
removeEntry = TRUE;
}
}
}
//Found in cache. If that's all the caller wanted, return now.
if ( !removeEntry && findInCacheOnly )
return cachedNATable;
//if this is the first time this cache entry has been accessed
//during the current statement
if( !removeEntry && !cachedNATable->accessedInCurrentStatement())
{
//Note: cachedNATable->labelDisplayKey_ won't not be NULL
//for NATable Objects that are in the cache. If the object
//is not a cached object from a previous statement then we
//will not come into this code.
//Read label to get time of last catalog operation
short error = 0;
//Get redef time of table
const Int64 tableRedefTime = cachedNATable->getRedefTime();
//Get last catalog operation time
Int64 labelCatalogOpTime = tableRedefTime;
Int64 rforkCatalogOpTime = tableRedefTime;
Int64 currentSchemaRedefTS = 0;
Int64 cachedSchemaRedefTS = 0;
if (!OSIM_runningSimulation())
{
if ((!cachedNATable->isHiveTable()) &&
(!cachedNATable->isHbaseTable()))
{
} // non-hive table
else if (!cachedNATable->isHbaseTable())
{
// oldest cache entries we will still accept
// Values for CQD HIVE_METADATA_REFRESH_INTERVAL:
// -1: Never invalidate any metadata
// 0: Always check for the latest metadata in the compiler,
// no check in the executor
// >0: Check in the compiler, metadata is valid n seconds
// (n = value of CQD). Recompile plan after n seconds.
// NOTE: n has to be long enough to compile the statement,
// values < 20 or so are impractical.
Int64 refreshInterval =
(Int64) CmpCommon::getDefaultLong(HIVE_METADATA_REFRESH_INTERVAL);
Int32 defaultStringLen =
CmpCommon::getDefaultLong(HIVE_MAX_STRING_LENGTH);
Int64 expirationTimestamp = refreshInterval;
NAString defSchema =
ActiveSchemaDB()->getDefaults().getValue(HIVE_DEFAULT_SCHEMA);
defSchema.toUpper();
if (refreshInterval > 0)
expirationTimestamp = NA_JulianTimestamp() - 1000000 * refreshInterval;
// if default string length changed, don't reuse this entry
if (defaultStringLen != cachedNATable->getHiveDefaultStringLen())
removeEntry = TRUE;
QualifiedName objName = cachedNATable->getTableName();
NAString sName = objName.getSchemaName();
const NAString tName = objName.getObjectName();
// map the Trafodion default Hive schema (usually "HIVE")
// to the name used in Hive (usually "default")
if (objName.getUnqualifiedSchemaNameAsAnsiString() == defSchema)
sName = hiveMetaDB_->getDefaultSchemaName();
// validate Hive table timestamps
if (!hiveMetaDB_->validate(cachedNATable->getHiveTableId(),
cachedNATable->getRedefTime(),
sName.data(), tName.data()))
removeEntry = TRUE;
// validate HDFS stats and update them in-place, if needed
if (!removeEntry)
removeEntry =
! (cachedNATable->getClusteringIndex()->
getHHDFSTableStats()->validateAndRefresh(expirationTimestamp));
}
} // ! osim simulation
//if time of last catalog operation and table redef times
//don't match, then delete this cache entry since it is
//stale.
//if error is non-zero then we were not able to read file
//label and therefore delete this cached entry because
//we cannot ensure it is fresh.
if((CmpCommon::statement()->recompiling())||
(labelCatalogOpTime != tableRedefTime )||
(error)||
(currentSchemaRedefTS != cachedSchemaRedefTS) ||
(!usingCache()) ||
(refreshCacheInThisStatement_) ||
(removeEntry == TRUE)) // to avoid unnecessary read of metadata
{
//mark this entry to be removed
removeEntry = TRUE;
}
} // !cachedNATable->accessedInCurrentStatement()
if(removeEntry)
{
//remove from list of cached NATables
cachedTableList_.remove(cachedNATable);
//remove pointer to NATable from cache
remove(key);
//if metadata caching is ON, then adjust cache size
//since we are deleting a caching entry
if(cacheMetaData_)
currentCacheSize_ = heap_->getAllocSize();
//insert into list of tables that will be deleted
//at the end of the statement after the query has
//been compiled and the plan has been sent to the
//executor. The delete is done in method
//NATableDB::resetAfterStatement(). This basically
//gives a little performance saving because the delete
//won't be part of the compile time as perceived by the
//client of the compiler
tablesToDeleteAfterStatement_.insert(cachedNATable);
return NULL;
}
else {
// Special tables are not added to the statement table list.
if( (NOT cachedNATable->getExtendedQualName().isSpecialTable()) ||
(cachedNATable->getExtendedQualName().getSpecialType() ==
ExtendedQualName::MV_TABLE) ||
(cachedNATable->getExtendedQualName().getSpecialType() ==
ExtendedQualName::GHOST_MV_TABLE) ||
(cachedNATable->getExtendedQualName().getSpecialType() ==
ExtendedQualName::GHOST_INDEX_TABLE) ||
(cachedNATable->getExtendedQualName().getSpecialType() ==
ExtendedQualName::INDEX_TABLE)
)
statementTableList_.insert(cachedNATable);
}
//increment the replacement, if not already max
if(cachedNATable)
{
cachedNATable->replacementCounter_+=2;
//don't let replacementcounter go over NATABLE_MAX_REFCOUNT
if(cachedNATable->replacementCounter_ > NATABLE_MAX_REFCOUNT)
cachedNATable->replacementCounter_ = NATABLE_MAX_REFCOUNT;
//Keep track of tables accessed during current statement
if((!cachedNATable->accessedInCurrentStatement()))
{
cachedNATable->setAccessedInCurrentStatement();
statementCachedTableList_.insert(cachedNATable);
}
}
//return NATable from cache
return cachedNATable;
}
// by default column histograms are marked to not be fetched,
// i.e. needHistogram_ is initialized to DONT_NEED_HIST.
// this method will mark columns for appropriate histograms depending on
// where they have been referenced in the query
void NATable::markColumnsForHistograms()
{
// Check if Show Query Stats command is being run
NABoolean runningShowQueryStatsCmd = CmpCommon::context()->showQueryStats();
// we want to get 1 key column that is not SYSKEY
NABoolean addSingleIntHist = FALSE;
if(colArray_.getColumn("SYSKEY"))
addSingleIntHist = TRUE;
// iterate over all the columns in the table
for(UInt32 i=0;i<colArray_.entries();i++)
{
// get a reference to the column
NAColumn * column = colArray_[i];
// is column part of a key
NABoolean isAKeyColumn = (column->isIndexKey() OR column->isPrimaryKey()
OR column->isPartitioningKey());
//check if this column requires histograms
if(column->isReferencedForHistogram() ||
(isAKeyColumn && isHbaseTable()))
column->setNeedFullHistogram();
else
// if column is:
// * a key
// OR
// * isReferenced but not for histogram and addSingleIntHist is true
if (isAKeyColumn ||
((runningShowQueryStatsCmd || addSingleIntHist) &&
column->isReferenced() && !column->isReferencedForHistogram()))
{
// if column is not a syskey
if (addSingleIntHist && (column->getColName() != "SYSKEY"))
addSingleIntHist = FALSE;
column->setNeedCompressedHistogram();
}
else
if (column->getType()->getVarLenHdrSize() &&
(CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) != DF_OFF ||
CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT_BMO) != DF_OFF ))
{
column->setNeedCompressedHistogram();
}
}
}
const QualifiedName& NATable::getFullyQualifiedGuardianName()
{
//qualified name and fileSetName are different
//so we use fileSetName because it will contain
//fully qualified guardian name
QualifiedName * fileName;
if(qualifiedName_.getQualifiedNameObj().getQualifiedNameAsString()
!= fileSetName_.getQualifiedNameAsString())
{
fileName = new(CmpCommon::statementHeap()) QualifiedName
(fileSetName_,CmpCommon::statementHeap());
}
else
{
fileName = new(CmpCommon::statementHeap()) QualifiedName
(qualifiedName_.getQualifiedNameObj(),CmpCommon::statementHeap());
}
return *fileName;
}
ExtendedQualName::SpecialTableType NATable::getTableType()
{
return qualifiedName_.getSpecialType();
}
NABoolean NATable::hasSaltedColumn(Lng32 * saltColPos)
{
for (CollIndex i=0; i<colArray_.entries(); i++ )
{
if ( colArray_[i]->isSaltColumn() )
{
if (saltColPos)
*saltColPos = i;
return TRUE;
}
}
return FALSE;
}
NABoolean NATable::hasDivisioningColumn(Lng32 * divColPos)
{
for (CollIndex i=0; i<colArray_.entries(); i++ )
{
if ( colArray_[i]->isDivisioningColumn() )
{
if (divColPos)
*divColPos = i;
return TRUE;
}
}
return FALSE;
}
// Get the part of the row size that is computable with info we have available
// without accessing HBase. The result is passed to estimateHBaseRowCount(), which
// completes the row size calculation with HBase info.
//
// A row stored in HBase consists of the following fields for each column:
// -----------------------------------------------------------------------
// | Key |Value | Row | Row |Column|Column|Column| Time | Key |Value |
// Field |Length|Length| Key | Key |Family|Family|Qualif| stamp| Type | |
// | | |Length| |Length| | | | | |
// -----------------------------------------------------------------------
// # Bytes 4 4 2 1 8 1
//
// The field lengths calculated here are for Row Key, Column Qualif, and Value.
// The size of the Value fields are not known to HBase, which treats cols as
// untyped, so we add up their lengths here, as well as the row key lengths,
// which are readily accessible via Traf metadata. The qualifiers, which represent
// the names of individual columns, are not the Trafodion column names, but
// minimal binary values that are mapped to the actual column names.
// The fixed size fields could also be added in here, but we defer that to the Java
// side so constants of the org.apache.hadoop.hbase.KeyValue class can be used.
// The single column family used by Trafodion is also a known entity, but we
// again do it in Java using the HBase client interface as insulation against
// possible future changes.
Int32 NATable::computeHBaseRowSizeFromMetaData() const
{
Int32 partialRowSize = 0;
Int32 rowKeySize = 0;
const NAColumnArray& keyCols = clusteringIndex_->getIndexKeyColumns();
CollIndex numKeyCols = keyCols.entries();
// For each column of the table, add the length of its value and the length of
// its name (HBase column qualifier). If a given column is part of the primary
// key, add the length of its value again, because it is part of the HBase row
// key.
for (Int32 colInx=0; colInx<colcount_; colInx++)
{
// Get length of the column qualifier and its data.
NAColumn* col = colArray_[colInx];;
Lng32 colLen = col->getType()->getNominalSize(); // data length
Lng32 colPos = col->getPosition(); // position in table
partialRowSize += colLen;
// The qualifier is not the actual column name, but a binary value
// representing the ordinal position of the col in the table.
// Single byte is used if possible.
partialRowSize++;
if (colPos > 255)
partialRowSize++;
// Add col length again if a primary key column, because it will be part
// of the row key.
NABoolean found = FALSE;
for (CollIndex keyColInx=0; keyColInx<numKeyCols && !found; keyColInx++)
{
if (colPos == keyCols[keyColInx]->getPosition())
{
rowKeySize += colLen;
found = TRUE;
}
}
}
partialRowSize += rowKeySize;
return partialRowSize;
}
// For an HBase table, we can estimate the number of rows by dividing the number
// of KeyValues in all HFiles of the table by the number of columns (with a few
// other considerations).
Int64 NATable::estimateHBaseRowCount() const
{
Int64 estRowCount = 0;
ExpHbaseInterface* ehi = getHBaseInterface();
if (ehi)
{
HbaseStr fqTblName;
NAString tblName = getTableName().getQualifiedNameAsString();
fqTblName.len = tblName.length();
fqTblName.val = new(STMTHEAP) char[fqTblName.len+1];
strncpy(fqTblName.val, tblName.data(), fqTblName.len);
fqTblName.val[fqTblName.len] = '\0';
Int32 partialRowSize = computeHBaseRowSizeFromMetaData();
Lng32 retcode = ehi->estimateRowCount(fqTblName,
partialRowSize,
colcount_,
estRowCount);
NADELETEBASIC(fqTblName.val, STMTHEAP);
// Return 0 as the row count if an error occurred while estimating it.
// The estimate could also be 0 if there is less than 1MB of storage
// dedicated to the table -- no HFiles, and < 1MB in MemStore, for which
// size is reported only in megabytes.
if (retcode < 0)
estRowCount = 0;
delete ehi;
}
return estRowCount;
}
// Method to get hbase regions servers node names
ExpHbaseInterface* NATable::getHBaseInterface() const
{
if (!isHbaseTable() || isSeabaseMDTable() ||
getExtendedQualName().getQualifiedNameObj().getObjectName() == HBASE_HISTINT_NAME ||
getExtendedQualName().getQualifiedNameObj().getObjectName() == HBASE_HIST_NAME ||
getSpecialType() == ExtendedQualName::VIRTUAL_TABLE)
return NULL;
return NATable::getHBaseInterfaceRaw();
}
ExpHbaseInterface* NATable::getHBaseInterfaceRaw()
{
NADefaults* defs = &ActiveSchemaDB()->getDefaults();
const char* server = defs->getValue(HBASE_SERVER);
const char* zkPort = defs->getValue(HBASE_ZOOKEEPER_PORT);
ExpHbaseInterface* ehi = ExpHbaseInterface::newInstance
(STMTHEAP, server, zkPort);
Lng32 retcode = ehi->init(NULL);
if (retcode < 0)
{
*CmpCommon::diags()
<< DgSqlCode(-8448)
<< DgString0((char*)"ExpHbaseInterface::init()")
<< DgString1(getHbaseErrStr(-retcode))
<< DgInt0(-retcode)
<< DgString2((char*)GetCliGlobals()->getJniErrorStr().data());
delete ehi;
return NULL;
}
return ehi;
}
NAArray<HbaseStr> *NATable::getRegionsBeginKey(const char* hbaseName)
{
ExpHbaseInterface* ehi = getHBaseInterfaceRaw();
NAArray<HbaseStr> *keyArray = NULL;
if (!ehi)
return NULL;
else
{
keyArray = ehi->getRegionBeginKeys(hbaseName);
delete ehi;
}
return keyArray;
}
NABoolean NATable::getRegionsNodeName(Int32 partns, ARRAY(const char *)& nodeNames ) const
{
ExpHbaseInterface* ehi = getHBaseInterface();
if (!ehi)
return FALSE;
else
{
HbaseStr fqTblName;
CorrName corrName(getTableName());
NAString tblName = (corrName.isHbaseCell() || corrName.isHbaseRow()) ?
corrName.getQualifiedNameObj().getObjectName()
:
getTableName().getQualifiedNameAsString();
fqTblName.len = tblName.length();
fqTblName.val = new(STMTHEAP) char[fqTblName.len+1];
strncpy(fqTblName.val, tblName.data(), fqTblName.len);
fqTblName.val[fqTblName.len] = '\0';
Lng32 retcode = ehi->getRegionsNodeName(fqTblName, partns, nodeNames);
NADELETEBASIC(fqTblName.val, STMTHEAP);
delete ehi;
if (retcode < 0)
return FALSE;
}
return TRUE;
}
// Method to get hbase table index levels and block size
NABoolean NATable::getHbaseTableInfo(Int32& hbtIndexLevels, Int32& hbtBlockSize) const
{
ExpHbaseInterface* ehi = getHBaseInterface();
if (!ehi)
return FALSE;
else
{
HbaseStr fqTblName;
NAString tblName = getTableName().getQualifiedNameAsString();
fqTblName.len = tblName.length();
fqTblName.val = new(STMTHEAP) char[fqTblName.len+1];
strncpy(fqTblName.val, tblName.data(), fqTblName.len);
fqTblName.val[fqTblName.len] = '\0';
Lng32 retcode = ehi->getHbaseTableInfo(fqTblName,
hbtIndexLevels,
hbtBlockSize);
NADELETEBASIC(fqTblName.val, STMTHEAP);
delete ehi;
if (retcode < 0)
return FALSE;
}
return TRUE;
}
// get details of this NATable cache entry
void NATableDB::getEntryDetails(
Int32 ii, // (IN) : NATable cache iterator entry
NATableEntryDetails &details) // (OUT): cache entry's details
{
Int32 NumEnt = cachedTableList_.entries();
if ( ( NumEnt == 0 ) || ( NumEnt <= ii ) )
{
memset(&details, 0, sizeof(details));
}
else {
NATable * object = cachedTableList_[ii];
QualifiedName QNO = object->qualifiedName_.getQualifiedNameObj();
Int32 partLen = QNO.getCatalogName().length();
strncpy(details.catalog, (char *)(QNO.getCatalogName().data()), partLen );
details.catalog[partLen] = '\0';
partLen = QNO.getSchemaName().length();
strncpy(details.schema, (char *)(QNO.getSchemaName().data()), partLen );
details.schema[partLen] = '\0';
partLen = QNO.getObjectName().length();
strncpy(details.object, (char *)(QNO.getObjectName().data()), partLen );
details.object[partLen] = '\0';
details.size = object->sizeInCache_;
}
}
NABoolean NATableDB::isHiveTable(CorrName& corrName)
{
return corrName.isHive();
}
NABoolean NATableDB::isSQUtiDisplayExplain(CorrName& corrName)
{
const char* tblName = corrName.getQualifiedNameObj().getObjectName();
if ( !strcmp(tblName, "EXE_UTIL_DISPLAY_EXPLAIN__"))
return TRUE;
if ( !strcmp(tblName, "EXPLAIN__"))
return TRUE;
if ( !strcmp(tblName, "HIVEMD__"))
return TRUE;
if ( !strcmp(tblName, "DESCRIBE__"))
return TRUE;
if ( !strcmp(tblName, "EXE_UTIL_EXPR__"))
return TRUE;
if ( !strcmp(tblName, "STATISTICS__"))
return TRUE;
return FALSE;
}
NABoolean NATableDB::isSQInternalStoredProcedure(CorrName& corrName)
{
const char* tblName = corrName.getQualifiedNameObj().getObjectName();
if ( !strncmp(tblName, "SPTableOutQUERYCACHEENTRIES",
strlen("SPTableOutQUERYCACHEENTRIES")))
return TRUE;
if ( !strncmp(tblName, "SPTableOutQUERYCACHEDELETE",
strlen("SPTableOutQUERYCACHEDELETE")))
return TRUE;
if ( !strncmp(tblName, "SPTableOutQUERYCACHE",
strlen("SPTableOutQUERYCACHE")))
return TRUE;
if ( !strncmp(tblName, "SPTableOutHYBRIDQUERYCACHEENTRIES",
strlen("SPTableOutHYBRIDQUERYCACHEENTRIES")))
return TRUE;
if ( !strncmp(tblName, "SPTableOutHYBRIDQUERYCACHE",
strlen("SPTableOutHYBRIDQUERYCACHE")))
return TRUE;
return FALSE;
}
NABoolean NATableDB::isSQUmdTable(CorrName& corrName)
{
return FALSE;
}
NATable * NATableDB::get(CorrName& corrName, BindWA * bindWA,
desc_struct *inTableDescStruct){
//check cache to see if a cached NATable object exists
NATable *table = get(&corrName.getExtendedQualNameObj(), bindWA);
if (table && (corrName.isHbase() || corrName.isSeabase()) && inTableDescStruct)
{
remove(table->getKey());
table = NULL;
}
if (table && ((table->isHbaseTable() || table->isSeabaseTable()) && !(table->isSeabaseMDTable())))
{
if ((CmpCommon::getDefault(TRAF_RELOAD_NATABLE_CACHE) == DF_ON))
{
remove(table->getKey());
table = NULL;
}
}
if (table && (corrName.isHbaseCell() || corrName.isHbaseRow()))
{
if (NOT HbaseAccess::validateVirtualTableDesc(table))
{
remove(table->getKey());
table = NULL;
}
}
// for caching statistics
if ((cacheMetaData_ && useCache_) && corrName.isCacheable())
{
//One lookup counted
++totalLookupsCount_;
if (table) ++totalCacheHits_; //Cache hit counted
}
NABoolean isMV = (table && table->isAnMV());
if (NOT table ||
(NOT isMV && table->getSpecialType() != corrName.getSpecialType())) {
// in open source, only the SEABASE catalog is allowed.
// Return an error if some other catalog is being used.
if ((NOT corrName.isHbase()) &&
(NOT corrName.isSeabase()) &&
(NOT corrName.isHive()) &&
(corrName.getSpecialType() != ExtendedQualName::VIRTUAL_TABLE))
{
*CmpCommon::diags()
<< DgSqlCode(-1002)
<< DgCatalogName(corrName.getQualifiedNameObj().getCatalogName())
<< DgString0("");
bindWA->setErrStatus();
return NULL;
}
// If this is a 'special' table, generate a table descriptor for it.
//
if (NOT inTableDescStruct && corrName.isSpecialTable())
inTableDescStruct = generateSpecialDesc(corrName);
//Heap used by the NATable object
NAMemory * naTableHeap = CmpCommon::statementHeap();
size_t allocSizeBefore = 0;
//if NATable caching is on check if this table is not already
//in the NATable cache. If it is in the cache create this NATable
//on the statment heap, since the cache can only store one value per
//key, therefore all duplicates (two or more different NATable objects
//that have the same key) are deleted at the end of the statement.
//ALSO
//We don't cache any special tables across statements. Please check
//the class ExtendedQualName for method isSpecialTable to see what
//are special tables
if (((NOT table) && cacheMetaData_ && useCache_) &&
corrName.isCacheable()){
naTableHeap = getHeap();
allocSizeBefore = naTableHeap->getAllocSize();
}
//if table is in cache tableInCache will be non-NULL
//otherwise it is NULL.
NATable * tableInCache = table;
if ((corrName.isHbase() || corrName.isSeabase()) &&
(!isSQUmdTable(corrName)) &&
(!isSQUtiDisplayExplain(corrName)) &&
(!isSQInternalStoredProcedure(corrName))
) {
// ------------------------------------------------------------------
// Create an NATable object for a Trafodion/HBase table
// ------------------------------------------------------------------
CmpSeabaseDDL cmpSBD((NAHeap *)CmpCommon::statementHeap());
desc_struct *tableDesc = NULL;
NABoolean isSeabase = FALSE;
NABoolean isSeabaseMD = FALSE;
NABoolean isUserUpdatableSeabaseMD = FALSE;
NABoolean isHbaseCell = corrName.isHbaseCell();
NABoolean isHbaseRow = corrName.isHbaseRow();
if (isHbaseCell || isHbaseRow)// explicit cell or row format specification
{
const char* extHBaseName = corrName.getQualifiedNameObj().getObjectName();
if (cmpSBD.existsInHbase(extHBaseName) != 1)
{
*CmpCommon::diags()
<< DgSqlCode(-1389)
<< DgString0(corrName.getQualifiedNameObj().getObjectName());
bindWA->setErrStatus();
return NULL;
}
NAArray<HbaseStr> *keyArray = NATable::getRegionsBeginKey(extHBaseName);
tableDesc =
HbaseAccess::createVirtualTableDesc
(corrName.getExposedNameAsAnsiString(FALSE, TRUE).data(),
isHbaseRow, isHbaseCell, keyArray);
deleteNAArray(STMTHEAP, keyArray);
isSeabase = FALSE;
}
else if (corrName.isSeabaseMD())
{
if (corrName.isSpecialTable() && corrName.getSpecialType() == ExtendedQualName::INDEX_TABLE)
{
tableDesc =
cmpSBD.getSeabaseTableDesc(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName(),
COM_INDEX_OBJECT);
}
else
{
tableDesc =
cmpSBD.getSeabaseTableDesc(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName(),
COM_BASE_TABLE_OBJECT);
if (tableDesc)
{
if (cmpSBD.isUserUpdatableSeabaseMD(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName()))
isUserUpdatableSeabaseMD = TRUE;
}
}
isSeabase = TRUE;
isSeabaseMD = TRUE;
}
else if (! inTableDescStruct)
{
ComObjectType objectType = COM_BASE_TABLE_OBJECT;
isSeabase = TRUE;
if (corrName.isSpecialTable())
{
switch (corrName.getSpecialType())
{
case ExtendedQualName::INDEX_TABLE:
{
objectType = COM_INDEX_OBJECT;
break;
}
case ExtendedQualName::SG_TABLE:
{
objectType = COM_SEQUENCE_GENERATOR_OBJECT;
isSeabase = FALSE;
break;
}
case ExtendedQualName::LIBRARY_TABLE:
{
objectType = COM_LIBRARY_OBJECT;
isSeabase = FALSE;
break;
}
default: //TODO: No SpecialTableType for UDFs/Routines/COM_USER_DEFINED_ROUTINE_OBJECT
{
objectType = COM_BASE_TABLE_OBJECT;
}
}
}
tableDesc = cmpSBD.getSeabaseTableDesc(
corrName.getQualifiedNameObj().getCatalogName(),
corrName.getQualifiedNameObj().getSchemaName(),
corrName.getQualifiedNameObj().getObjectName(),
objectType);
}
if (inTableDescStruct)
tableDesc = inTableDescStruct;
if (tableDesc)
table = new (naTableHeap)
NATable(bindWA, corrName, naTableHeap, tableDesc);
if (!tableDesc || !table || bindWA->errStatus())
{
if (isSeabase)
*CmpCommon::diags()
<< DgSqlCode(-4082)
<< DgTableName(corrName.getExposedNameAsAnsiString());
else
*CmpCommon::diags()
<< DgSqlCode(-1389)
<< DgString0(corrName.getExposedNameAsAnsiString());
bindWA->setErrStatus();
return NULL;
}
table->setIsHbaseCellTable(isHbaseCell);
table->setIsHbaseRowTable(isHbaseRow);
table->setIsSeabaseTable(isSeabase);
table->setIsSeabaseMDTable(isSeabaseMD);
table->setIsUserUpdatableSeabaseMDTable(isUserUpdatableSeabaseMD);
}
else if (isHiveTable(corrName) &&
(!isSQUmdTable(corrName)) &&
(!isSQUtiDisplayExplain(corrName)) &&
(!corrName.isSpecialTable()) &&
(!isSQInternalStoredProcedure(corrName))
) {
// ------------------------------------------------------------------
// Create an NATable object for a Hive table
// ------------------------------------------------------------------
if ( hiveMetaDB_ == NULL ) {
if (CmpCommon::getDefault(HIVE_USE_FAKE_TABLE_DESC) != DF_ON)
{
hiveMetaDB_ = new (CmpCommon::contextHeap()) HiveMetaData();
if ( !hiveMetaDB_->init() ) {
*CmpCommon::diags() << DgSqlCode(-1190)
<< DgString0(hiveMetaDB_->getErrMethodName())
<< DgString1(hiveMetaDB_->getErrCodeStr())
<< DgString2(hiveMetaDB_->getErrDetail())
<< DgInt0(hiveMetaDB_->getErrCode());
bindWA->setErrStatus();
NADELETEBASIC(hiveMetaDB_, CmpCommon::contextHeap());
hiveMetaDB_ = NULL;
return NULL;
}
}
else
hiveMetaDB_ = new (CmpCommon::contextHeap())
HiveMetaData(); // fake metadata
}
// this default schema name is what the Hive default schema is called in SeaHive
NAString defSchema = ActiveSchemaDB()->getDefaults().getValue(HIVE_DEFAULT_SCHEMA);
defSchema.toUpper();
struct hive_tbl_desc* htbl;
NAString tableNameInt = corrName.getQualifiedNameObj().getObjectName();
NAString schemaNameInt = corrName.getQualifiedNameObj().getSchemaName();
if (corrName.getQualifiedNameObj().getUnqualifiedSchemaNameAsAnsiString() == defSchema)
schemaNameInt = hiveMetaDB_->getDefaultSchemaName();
// Hive stores names in lower case
// Right now, just downshift, could check for mixed case delimited
// identifiers at a later point, or wait until Hive supports delimited identifiers
schemaNameInt.toLower();
tableNameInt.toLower();
if (CmpCommon::getDefault(HIVE_USE_FAKE_TABLE_DESC) == DF_ON)
htbl = hiveMetaDB_->getFakedTableDesc(tableNameInt);
else
htbl = hiveMetaDB_->getTableDesc(schemaNameInt, tableNameInt);
if ( htbl )
{
table = new (naTableHeap) NATable(bindWA, corrName, naTableHeap, htbl);
}
else
{
if ((hiveMetaDB_->getErrCode() == 0)||
(hiveMetaDB_->getErrCode() == 100))
{
*CmpCommon::diags()
<< DgSqlCode(-1388)
<< DgTableName(corrName.getExposedNameAsAnsiString());
}
else
{
*CmpCommon::diags()
<< DgSqlCode(-1192)
<< DgString0(hiveMetaDB_->getErrMethodName())
<< DgString1(hiveMetaDB_->getErrCodeStr())
<< DgString2(hiveMetaDB_->getErrDetail())
<< DgInt0(hiveMetaDB_->getErrCode());
hiveMetaDB_->resetErrorInfo();
}
bindWA->setErrStatus();
return NULL;
}
} else
// ------------------------------------------------------------------
// Neither Trafodion nor Hive (probably dead code below)
// ------------------------------------------------------------------
table = new (naTableHeap)
NATable(bindWA, corrName, naTableHeap, inTableDescStruct);
CMPASSERT(table);
//if there was a problem in creating the NATable object
if (NOT ((table->getExtendedQualName().isSpecialTable()) &&
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::SG_TABLE)) &&
(table->getColumnCount() == 0)) {
bindWA->setErrStatus();
return NULL;
}
// Special tables are not added to the statement table list.
// Index tables are added to the statement table list
if( (NOT table->getExtendedQualName().isSpecialTable()) ||
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::INDEX_TABLE) ||
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::MV_TABLE) ||
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::GHOST_MV_TABLE) ||
(table->getExtendedQualName().getSpecialType() ==
ExtendedQualName::GHOST_INDEX_TABLE)
)
statementTableList_.insert(table);
//if there was no entry in cache associated with this key then
//insert it into cache.
//if there is already a value associated with this in the cache
//then don't insert into cache.
//This might happen e.g. if we call this method twice for the same table
//in the same statement.
if(!tableInCache){
//insert into cache
insert(table);
//if we are using the cache
//if this NATable object is cacheable
if((useCache_) &&
(corrName.isCacheable()))
{
//insert into list of all cached tables;
cachedTableList_.insert(table);
//insert into list of cached tables accessed
//during this statement
statementCachedTableList_.insert(table);
//if metadata caching is ON then adjust the size of the cache
//since we are adding an entry to the cache
if(cacheMetaData_)
{
currentCacheSize_ = heap_->getAllocSize();
table->sizeInCache_ = currentCacheSize_ - allocSizeBefore;
}
//update the high watermark for caching statistics
if (currentCacheSize_ > highWatermarkCache_)
highWatermarkCache_ = currentCacheSize_;
//
// the CompilerTrackingInfo highWaterMark gets reset on each
// tracking interval so it is tracked independently
if (currentCacheSize_ > intervalWaterMark_)
intervalWaterMark_ = currentCacheSize_;
//if we are caching metadata and previously the cache was
//empty set this flag to TRUE to indicate that there is
//something in the cache
if(!metaDataCached_ && cacheMetaData_)
metaDataCached_ = TRUE;
//enforce the cache memory constraints
if(!enforceMemorySpaceConstraints())
{
//was not able to get cache size below
//max allowed cache size
#ifndef NDEBUG
CMPASSERT(FALSE);
#endif
}
}
else{
//this has to be on the context heap since we need
//it after the statement heap has been remove
ExtendedQualName * nonCacheableTableName = new(CmpCommon::contextHeap())
ExtendedQualName(corrName.getExtendedQualNameObj(),
CmpCommon::contextHeap());
//insert into list of names of special tables
nonCacheableTableList_.insert(nonCacheableTableName);
// insert into list of non cacheable table idents. This
// allows the idents to be removed after the statement so
// the context heap doesn't keep growing.
const LIST(CollIndex) & tableIdList = table->getTableIdList();
for(CollIndex i = 0; i < tableIdList.entries(); i++)
{
nonCacheableTableIdents_.insert(tableIdList[i]);
}
}
}
}
//setup this NATable object for use in current statement
//if this object has already been setup earlier in the
//statement then this method will just return without doing
//anything
if(table) {
table->setupForStatement();
}
return table;
}
void NATableDB::removeNATable2(CorrName &corrName, ComQiScope qiScope,
ComObjectType ot)
{
const ExtendedQualName* toRemove = &(corrName.getExtendedQualNameObj());
NAHashDictionaryIterator<ExtendedQualName,NATable> iter(*this);
ExtendedQualName *key = NULL;
NATable *cachedNATable = NULL;
NASet<Int64> objectUIDs(CmpCommon::statementHeap(), 1);
// iterate over all entries and remove the ones that match the name
// ignoring any partition clauses and other additional info
iter.getNext(key,cachedNATable);
while(key)
{
if (key->getQualifiedNameObj() == toRemove->getQualifiedNameObj())
{
//remove from list of cached NATables
if (cachedTableList_.remove(cachedNATable) > 0)
{
// LCOV_EXCL_START - caching is off by default for now
//if metadata caching is ON, then adjust cache size
//since we are deleting a caching entry
if(cacheMetaData_)
currentCacheSize_ = heap_->getAllocSize();
if (cachedNATable->heap_ &&
cachedNATable->heap_ != CmpCommon::statementHeap())
tablesToDeleteAfterStatement_.insert(cachedNATable);
// LCOV_EXCL_STOP
}
else
{
// this must have been a non-cacheable table
const LIST(CollIndex) & tableIdList = cachedNATable->getTableIdList();
for(CollIndex i = 0; i < tableIdList.entries(); i++)
{
nonCacheableTableIdents_.remove(tableIdList[i]);
}
for (CollIndex i=0; i<nonCacheableTableList_.entries(); i++)
{
if (*(nonCacheableTableList_[i]) == *key)
{
nonCacheableTableList_.removeAt(i);
i--;
}
}
}
//remove pointer to NATable from cache
remove(key);
objectUIDs.insert(cachedNATable->objectUid().castToInt64());
statementCachedTableList_.remove(cachedNATable);
statementTableList_.remove(cachedNATable);
}
iter.getNext(key,cachedNATable);
}
// clear out the other users' caches too.
if (qiScope == REMOVE_FROM_ALL_USERS)
{
// There are some scenarios where the affected object
// does not have an NATable cache entry. Need to get one and
// add its objectUID to the set.
if (0 == objectUIDs.entries())
{
Int64 ouid = lookupObjectUidByName(
toRemove->getQualifiedNameObj(),
ot,
FALSE);
if (ouid > 0)
objectUIDs.insert(ouid);
}
Int32 numKeys = objectUIDs.entries();
if (numKeys > 0)
{
SQL_QIKEY qiKeys[numKeys];
for (CollIndex i = 0; i < numKeys; i++)
{
qiKeys[i].ddlObjectUID = objectUIDs[i];
qiKeys[i].operation[0] = 'O';
qiKeys[i].operation[1] = 'R';
}
long retcode = SQL_EXEC_SetSecInvalidKeys(numKeys, qiKeys);
}
}
}
void NATableDB::removeNATable(CorrName &corrName, ComQiScope qiScope,
ComObjectType ot,
NABoolean ddlXns, NABoolean atCommit)
{
// if ddl xns are being used, add this name to ddlObjsList and
// invalidate NATable in my environment. This will allow subsequent
// operations running in my environemnt under my current transaction
// to access the latest definition.
//
// NATable removal for other users will happen at xn commit/rollback time.
//
// If atCommit is set, then this is being called at commit time.
// In that case, do NATable removal processing for all users
// instead of adding to ddlObjsList.
//
// If ddl xns are not being used, then invalidate NATable cache for
// all users.
if ((ddlXns) &&
(NOT atCommit))
{
CmpContext::DDLObjInfo ddlObj;
ddlObj.ddlObjName = corrName.getQualifiedNameAsString();
ddlObj.qiScope = qiScope;
ddlObj.ot = ot;
ddlObj.objUID = -1;
NABoolean found = FALSE;
for (Lng32 i = 0;
((NOT found) && (i < CmpCommon::context()->ddlObjsList().entries()));
i++)
{
CmpContext::DDLObjInfo &ddlObjInList =
CmpCommon::context()->ddlObjsList()[i];
if (ddlObj.ddlObjName == ddlObjInList.ddlObjName)
found = TRUE;
}
removeNATable2(corrName, qiScope, ot); //ComQiScope::REMOVE_MINE_ONLY, ot);
if (NOT found)
CmpCommon::context()->ddlObjsList().insert(ddlObj);
return;
}
removeNATable2(corrName, qiScope, ot);
}
//This method is called at the end of each statement to reset statement
//specific stuff in the NATable objects in the cache.
void NATableDB::resetAfterStatement(){
//Variable used for iteration in loops below
CollIndex i = 0;
//Variable used to point to a table's heap. Only delete the heap if it is
// neither the context nor the statement heap (i.e., allocated from the
// C++ system heap). The CmpContext heap is deleted in
// in ContextCli::deleteMe().
// The statement heap is deleted in the destructor of class CmpStatement.
NAMemory * tableHeap = NULL;
//if metadata caching (i.e. NATable caching) is not on then just
//flush the cache. Since it might be that there are still some
//tables in the cache.
if (!cacheMetaData_){
flushCache();
}
else{
//if caching is ON then reset all cached NATables used during statement
//if this was a DDL statment delete all NATables that participated in the
//statement
for (i=0; i < statementCachedTableList_.entries(); i++)
{
if(statementCachedTableList_[i])
{
//if the statment was a DDL statement, if so then delete
//all the tables used in the statement, since the DDL affected
//the tables and they should be reconstructed for whatever
//statement follows.
if((!useCache_)||
(statementCachedTableList_[i]->isAnMV())||
(statementCachedTableList_[i]->isAnMVMetaData())||
(statementCachedTableList_[i]->isAnMPTableWithAnsiName())||
(statementCachedTableList_[i]->constructionHadWarnings()) ||
(statementCachedTableList_[i]->getClearHDFSStatsAfterStmt())){
//remove from list of cached Tables
cachedTableList_.remove(statementCachedTableList_[i]);
//remove from the cache itself
remove(statementCachedTableList_[i]->getKey());
if ( statementCachedTableList_[i]->getHeapType() == NATable::OTHER ) {
delete statementCachedTableList_[i];
currentCacheSize_ = heap_->getAllocSize();
}
}
else{
statementCachedTableList_[i]->resetAfterStatement();
}
}
}
nonCacheableTableIdents_.clear();
//remove references to nonCacheable tables from cache
//and delete the name
for(i=0; i < nonCacheableTableList_.entries(); i++){
remove(nonCacheableTableList_[i]);
delete nonCacheableTableList_[i]; // delete the name only
}
//clear the list of special tables
nonCacheableTableList_.clear();
}
//delete tables that were not deleted earlier to
//save compile-time performance. Since the heaps
//deleted below are large 16KB+, it takes time
//to delete them. The time to delete these heaps
//at this point is not 'visible' in the compile-
//time since the statement has been compiled and
//sent to the executor.
for(i=0; i < tablesToDeleteAfterStatement_.entries(); i++)
{
if ( tablesToDeleteAfterStatement_[i]->getHeapType() == NATable::OTHER ) {
delete tablesToDeleteAfterStatement_[i];
}
currentCacheSize_ = heap_->getAllocSize();
}
//clear the list of tables to delete after statement
tablesToDeleteAfterStatement_.clear();
//clear the list of tables used in the current statement
statementTableList_.clear();
//clear the list of cached tables used in the current statement
statementCachedTableList_.clear();
//reset various statement level flags
refreshCacheInThisStatement_=FALSE;
useCache_=FALSE;
}
//flush the cache if there is anything cached in it
//otherwise just destroy all the keys in the cache.
//If there is nothing cached, which could mean either
//of the following:
//1. NATable caching is off.
//2. All entries currently in cache where created on
// the statment heap, i.e. not persistent across
// statements.
//In such a case we don't need to delete any NATable
//objects (since they will be removed when the statement
//heap is deleted. We only need to delete the keys.
void NATableDB::flushCache()
{
//if something is cached
if(metaDataCached_){
//set the flag to indicate cache is clear
metaDataCached_ = FALSE;
//Destroy the keys in the cache, this also
//clears out the cache entries without deleting
//the cached NATable
clearAndDestroyKeysOnly();
//delete the tables that were cached by deleting each table's
//heap. Each cached table and all of its stuff are allocated
//on a seperate heap (i.e. a heap per table). That seems to
//be the safest thing to do to avoid memory leaks.
for(CollIndex i=0; i < cachedTableList_.entries(); i++)
{
if(cachedTableList_[i])
{
delete cachedTableList_[i];
}
}
}
else{
//no metadata cached (i.e. metadata caching is off and there
//is no remaining metadata in the cache from when the caching
//was on). Just clear out the cache entries, of course we need
//to delete keys because the cache allocates keys on the context
//heap.
clearAndDestroyKeysOnly ();
}
//clear out the lists of tables in the cache
//1. list of tables in the cache used in this statement
//2. list of all tables in the cache
statementCachedTableList_.clear();
cachedTableList_.clear();
//set cache size to 0 to indicate nothing in cache
currentCacheSize_ = 0;
highWatermarkCache_ = 0; // High watermark of currentCacheSize_
totalLookupsCount_ = 0; // reset NATable entries lookup counter
totalCacheHits_ = 0; // reset cache hit counter
// per interval counters
intervalWaterMark_ = 0;
}
//check if cache size is with maximum allowed cache size.
//if cache size is above the maximum allowed cache size,
//then remove entries in the cache based on the cache
//replacement policy to get the cache size under the maximum
//allowed cache size.
NABoolean NATableDB::enforceMemorySpaceConstraints()
{
//check if cache size is within memory constraints
if (maxCacheSize_ == 0 || heap_->getAllocSize() <= maxCacheSize_)
return TRUE;
//need to get cache size under memory allowance
//if our cursor is pointing past the end of the
//list of cached entries, reset it to point to
//start of the list of cached entries.
if(replacementCursor_ >= (Int32) cachedTableList_.entries())
replacementCursor_ = 0;
//keep track of entry in the list of cached entries
//where we are starting from, since we start from
//where we left off the last time this method got
//called.
Int32 startingCursorPosition = replacementCursor_;
Int32 numLoops = 0; //number of loops around the list of cached objects
//this loop iterates over list of cached NATable objects.
//in each iteration it decrements the replacementCounter
//of a table.
//if a table with a replacementCounter value of zero is
//encountered, it is removed if it is not being used
//in the current statement.
//check if cache is now within memory constraints
while (heap_->getAllocSize() > maxCacheSize_){
//get reference to table
NATable * table = cachedTableList_[replacementCursor_];
if(table)
//check if table has a zero replacementCount
if(!table->replacementCounter_)
{
//if table is not being accessed in current statement then remove it
if(!table->accessedInCurrentStatement_)
{
RemoveFromNATableCache( table , replacementCursor_ );
}
}
else{
table->replacementCounter_--;
}
replacementCursor_++;
//if replacement cursor ran of the end of the list of cached tables
//reset it to the beginig of the list
if(replacementCursor_ >= (Int32) cachedTableList_.entries())
replacementCursor_ = 0;
//check if we completed one loop around all the cached entries
//if so, increment the loop count
if(replacementCursor_ == startingCursorPosition){
numLoops++;
}
//did NATABLE_MAX_REFCOUNT loops around list of cached objects
//still could not free up enough space
//We check for NATABLE_MAX_REFCOUNT loops since the replacementCounter_
//is capped at NATABLE_MAX_REFCOUNT loops.
if(numLoops==NATABLE_MAX_REFCOUNT)
return FALSE;
}
//return true indicating cache size is below maximum memory allowance.
return TRUE;
}
//Remove all the NATable objects from the cache that were used during
//the current statement.
//This is used when a binder error occurs. In rare cases the binder
//error might be due to a stale metadata cache entry.
// LCOV_EXCL_START :cnu
void NATableDB::flushCacheEntriesUsedInCurrentStatement(){
//do this only if metadata caching is 'ON'
if(cacheMetaData_)
{
for (CollIndex i=0; i < statementCachedTableList_.entries(); i++)
{
if(statementCachedTableList_[i])
{
//remove from list of cached Tables
cachedTableList_.remove(statementCachedTableList_[i]);
//remove from the cache itself
remove(statementCachedTableList_[i]->getKey());
//keep track of change in cache size
delete statementCachedTableList_[i];
currentCacheSize_ = heap_->getAllocSize();
}
}
//clear the list of tables used in the current statement
statementCachedTableList_.clear();
}
}
// LCOV_EXCL_STOP
//Turn metadata caching ON
void NATableDB::setCachingON()
{
resizeCache(getDefaultAsLong(METADATA_CACHE_SIZE)*1024*1024);
cacheMetaData_ = TRUE;
}
// Obtain a list of table identifiers for the current statement.
// Allocate the list on the heap passed.
const LIST(CollIndex) &
NATableDB::getStmtTableIdList(NAMemory *heap) const
{
LIST(CollIndex) *list = new (heap) LIST(CollIndex)(heap);
for(CollIndex i = 0; i < statementTableList_.entries(); i++)
{
NATable *table = statementTableList_[i];
list->insert(table->getTableIdList());
}
return *list;
}
// function to return number of entries in cachedTableList_ LIST.
Int32 NATableDB::end()
{
return cachedTableList_.entries() ;
}
void
NATableDB::free_entries_with_QI_key(Int32 numKeys, SQL_QIKEY* qiKeyArray)
{
UInt32 currIndx = 0;
// For each table in cache, see if it should be removed
while ( currIndx < cachedTableList_.entries() )
{
NATable * currTable = cachedTableList_[currIndx];
// Only need to remove seabase tables and external Hive/hbase tables
if (!currTable->isSeabaseTable() && !currTable->hasExternalTable())
{
currIndx++;
continue;
}
if (qiCheckForInvalidObject(numKeys, qiKeyArray,
currTable->objectUid().get_value(),
currTable->getSecKeySet()))
{
if ( currTable->accessedInCurrentStatement_ )
statementCachedTableList_.remove( currTable );
while ( statementTableList_.remove( currTable ) ) // Remove as many times as on list!
{ ; }
RemoveFromNATableCache( currTable , currIndx );
}
else currIndx++; //Increment if NOT found ... else currIndx already pointing at next entry!
}
}
//
// Remove a specifed NATable entry from the NATable Cache
//
void
NATableDB::RemoveFromNATableCache( NATable * NATablep , UInt32 currIndx )
{
NAMemory * tableHeap = NATablep->heap_;
NABoolean InStatementHeap = (tableHeap == (NAMemory *)CmpCommon::statementHeap());
remove(NATablep->getKey());
cachedTableList_.removeAt( currIndx );
if ( ! InStatementHeap )
delete NATablep;
if ( ! InStatementHeap )
currentCacheSize_ = heap_->getAllocSize();
}
//
// Remove ALL entries from the NATable Cache that have been
// marked for removal before the next compilation.
//
void
NATableDB::remove_entries_marked_for_removal()
{
NATableDB * TableDB = ActiveSchemaDB()->getNATableDB() ;
UInt32 currIndx = 0;
while ( currIndx < TableDB->cachedTableList_.entries() )
{
NATable * NATablep = TableDB->cachedTableList_[ currIndx ] ;
NABoolean accInCurrStmt = NATablep->accessedInCurrentStatement() ;
if ( NATablep->isToBeRemovedFromCacheBNC() ) //to be removed by CmpMain Before Next Comp. retry?
{
TableDB->RemoveFromNATableCache( NATablep, currIndx );
if ( accInCurrStmt )
{
TableDB->statementCachedTableList_.remove( NATablep );
}
while ( TableDB->statementTableList_.remove( NATablep ) ) // Remove as many times as on list!
{ ; }
}
else currIndx++ ; //Note: No increment if the entry was removed !
}
}
//
// UNMARK all entries from the NATable Cache that have been
// marked for removal before the next compilation. We have
// decided to leave them in the NATable cache afterall.
//
void
NATableDB::unmark_entries_marked_for_removal()
{
NATableDB * TableDB = ActiveSchemaDB()->getNATableDB() ;
UInt32 currIndx = 0;
while ( currIndx < TableDB->cachedTableList_.entries() )
{
NATable * NATablep = TableDB->cachedTableList_[ currIndx ] ;
if ( NATablep->isToBeRemovedFromCacheBNC() ) //to be removed by CmpMain Before Next Comp. retry?
{
NATablep->setRemoveFromCacheBNC(FALSE);
}
else currIndx++ ; //Note: No increment if the entry was removed !
}
}
void NATableDB::getCacheStats(NATableCacheStats & stats)
{
memset(stats.contextType, ' ', sizeof(stats.contextType));
stats.numLookups = totalLookupsCount_;
stats.numCacheHits = totalCacheHits_;
stats.currentCacheSize = currentCacheSize_;
stats.highWaterMark = highWatermarkCache_;
stats.maxCacheSize = maxCacheSize_;
stats.numEntries = cachedTableList_.entries();
}
| 1 | 11,992 | Will this cause a reload _all_ NATable information? It's too bad we don't have some way to limit the reload to just the tables that have a metadata mismatch. | apache-trafodion | cpp |
@@ -111,6 +111,7 @@ var opDocList = []stringString{
{"app_global_del", "delete key A from a global state of the current application"},
{"asset_holding_get", "read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value}"},
{"asset_params_get", "read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value}"},
+ {"assert", "immediately fail unless value X is not zero"},
}
var opDocByName map[string]string | 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package logic
import (
"fmt"
"github.com/algorand/go-algorand/protocol"
)
type stringString struct {
a string
b string
}
func stringStringListToMap(they []stringString) map[string]string {
out := make(map[string]string, len(they))
for _, v := range they {
out[v.a] = v.b
}
return out
}
// short description of every op
var opDocList = []stringString{
{"err", "Error. Panic immediately. This is primarily a fencepost against accidental zero bytes getting compiled into programs."},
{"sha256", "SHA256 hash of value X, yields [32]byte"},
{"keccak256", "Keccak256 hash of value X, yields [32]byte"},
{"sha512_256", "SHA512_256 hash of value X, yields [32]byte"},
{"ed25519verify", "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey => {0 or 1}"},
{"+", "A plus B. Panic on overflow."},
{"-", "A minus B. Panic if B > A."},
{"/", "A divided by B. Panic if B == 0."},
{"*", "A times B. Panic on overflow."},
{"<", "A less than B => {0 or 1}"},
{">", "A greater than B => {0 or 1}"},
{"<=", "A less than or equal to B => {0 or 1}"},
{">=", "A greater than or equal to B => {0 or 1}"},
{"&&", "A is not zero and B is not zero => {0 or 1}"},
{"||", "A is not zero or B is not zero => {0 or 1}"},
{"==", "A is equal to B => {0 or 1}"},
{"!=", "A is not equal to B => {0 or 1}"},
{"!", "X == 0 yields 1; else 0"},
{"len", "yields length of byte value X"},
{"itob", "converts uint64 X to big endian bytes"},
{"btoi", "converts bytes X as big endian to uint64"},
{"%", "A modulo B. Panic if B == 0."},
{"|", "A bitwise-or B"},
{"&", "A bitwise-and B"},
{"^", "A bitwise-xor B"},
{"~", "bitwise invert value X"},
{"mulw", "A times B out to 128-bit long result as low (top) and high uint64 values on the stack"},
{"addw", "A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack"},
{"intcblock", "load block of uint64 constants"},
{"intc", "push value from uint64 constants to stack by index into constants"},
{"intc_0", "push constant 0 from intcblock to stack"},
{"intc_1", "push constant 1 from intcblock to stack"},
{"intc_2", "push constant 2 from intcblock to stack"},
{"intc_3", "push constant 3 from intcblock to stack"},
{"bytecblock", "load block of byte-array constants"},
{"bytec", "push bytes constant to stack by index into constants"},
{"bytec_0", "push constant 0 from bytecblock to stack"},
{"bytec_1", "push constant 1 from bytecblock to stack"},
{"bytec_2", "push constant 2 from bytecblock to stack"},
{"bytec_3", "push constant 3 from bytecblock to stack"},
{"arg", "push Args[N] value to stack by index"},
{"arg_0", "push Args[0] to stack"},
{"arg_1", "push Args[1] to stack"},
{"arg_2", "push Args[2] to stack"},
{"arg_3", "push Args[3] to stack"},
{"txn", "push field from current transaction to stack"},
{"gtxn", "push field to the stack from a transaction in the current transaction group"},
{"txna", "push value of an array field from current transaction to stack"},
{"gtxna", "push value of a field to the stack from a transaction in the current transaction group"},
{"global", "push value from globals to stack"},
{"load", "copy a value from scratch space to the stack"},
{"store", "pop a value from the stack and store to scratch space"},
{"bnz", "branch if value X is not zero"},
{"bz", "branch if value X is zero"},
{"b", "branch unconditionally to offset"},
{"return", "use last value on stack as success value; end"},
{"pop", "discard value X from stack"},
{"dup", "duplicate last value on stack"},
{"dup2", "duplicate two last values on stack: A, B -> A, B, A, B"},
{"concat", "pop two byte strings A and B and join them, push the result"},
{"substring", "pop a byte string X. For immediate values in 0..255 M and N: extract a range of bytes from it starting at M up to but not including N, push the substring result. If N < M, or either is larger than the string length, the program fails"},
{"substring3", "pop a byte string A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the string length, the program fails"},
{"balance", "get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender"},
{"app_opted_in", "check if account specified by Txn.Accounts[A] opted in for the application B => {0 or 1}"},
{"app_local_get", "read from account specified by Txn.Accounts[A] from local state of the current application key B => value"},
{"app_local_get_ex", "read from account specified by Txn.Accounts[A] from local state of the application B key C => {0 or 1 (top), value}"},
{"app_global_get", "read key A from global state of a current application => value"},
{"app_global_get_ex", "read from application Txn.ForeignApps[A] global state key B => {0 or 1 (top), value}. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app"},
{"app_local_put", "write to account specified by Txn.Accounts[A] to local state of a current application key B with value C"},
{"app_global_put", "write key A and value B to global state of the current application"},
{"app_local_del", "delete from account specified by Txn.Accounts[A] local state key B of the current application"},
{"app_global_del", "delete key A from a global state of the current application"},
{"asset_holding_get", "read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value}"},
{"asset_params_get", "read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value}"},
}
var opDocByName map[string]string
// OpDoc returns a description of the op
func OpDoc(opName string) string {
if opDocByName == nil {
opDocByName = stringStringListToMap(opDocList)
}
return opDocByName[opName]
}
// notes on immediate bytes following the opcode
var opcodeImmediateNoteList = []stringString{
{"intcblock", "{varuint length} [{varuint value}, ...]"},
{"intc", "{uint8 int constant index}"},
{"bytecblock", "{varuint length} [({varuint value length} bytes), ...]"},
{"bytec", "{uint8 byte constant index}"},
{"arg", "{uint8 arg index N}"},
{"txn", "{uint8 transaction field index}"},
{"gtxn", "{uint8 transaction group index}{uint8 transaction field index}"},
{"txna", "{uint8 transaction field index}{uint8 transaction field array index}"},
{"gtxna", "{uint8 transaction group index}{uint8 transaction field index}{uint8 transaction field array index}"},
{"global", "{uint8 global field index}"},
{"bnz", "{0..0x7fff forward branch offset, big endian}"},
{"bz", "{0..0x7fff forward branch offset, big endian}"},
{"b", "{0..0x7fff forward branch offset, big endian}"},
{"load", "{uint8 position in scratch space to load from}"},
{"store", "{uint8 position in scratch space to store to}"},
{"substring", "{uint8 start position}{uint8 end position}"},
{"asset_holding_get", "{uint8 asset holding field index}"},
{"asset_params_get", "{uint8 asset params field index}"},
}
var opcodeImmediateNotes map[string]string
// OpImmediateNote returns a short string about immediate data which follows the op byte
func OpImmediateNote(opName string) string {
if opcodeImmediateNotes == nil {
opcodeImmediateNotes = stringStringListToMap(opcodeImmediateNoteList)
}
return opcodeImmediateNotes[opName]
}
// further documentation on the function of the opcode
var opDocExtraList = []stringString{
{"ed25519verify", "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack."},
{"bnz", "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are currently limited to forward branches only, 0-0x7fff. A future expansion might make this a signed 16 bit integer allowing for backward branches and looping.\n\nAt LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)"},
{"bz", "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`."},
{"b", "See `bnz` for details on how branches work. `b` always jumps to the offset."},
{"intcblock", "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script."},
{"bytecblock", "`bytecblock` loads the following program bytes into an array of byte string constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script."},
{"*", "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`."},
{"+", "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`."},
{"txn", "FirstValidTime causes the program to fail. The field is reserved for future use."},
{"gtxn", "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`."},
{"btoi", "`btoi` panics if the input is longer than 8 bytes."},
{"concat", "`concat` panics if the result would be greater than 4096 bytes."},
{"app_opted_in", "params: account index, application id (top of the stack on opcode entry). Return: 1 if opted in and 0 otherwise."},
{"app_local_get", "params: account index, state key. Return: value. The value is zero if the key does not exist."},
{"app_local_get_ex", "params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value."},
{"app_global_get_ex", "params: application index, state key. Return: value. Application index is"},
{"app_global_get", "params: state key. Return: value. The value is zero if the key does not exist."},
{"app_local_put", "params: account index, state key, value."},
{"app_local_del", "params: account index, state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)"},
{"app_global_del", "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)"},
{"asset_holding_get", "params: account index, asset id. Return: did_exist flag (1 if exist and 0 otherwise), value."},
{"asset_params_get", "params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 otherwise), value."},
}
var opDocExtras map[string]string
// OpDocExtra returns extra documentation text about an op
func OpDocExtra(opName string) string {
if opDocExtras == nil {
opDocExtras = stringStringListToMap(opDocExtraList)
}
return opDocExtras[opName]
}
// OpGroup is a grouping of ops for documentation purposes.
// e.g. "Arithmetic", ["+", "-", ...]
type OpGroup struct {
GroupName string
Ops []string
}
// OpGroupList is groupings of ops for documentation purposes.
var OpGroupList = []OpGroup{
{"Arithmetic", []string{"sha256", "keccak256", "sha512_256", "ed25519verify", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "concat", "substring", "substring3"}},
{"Loading Values", []string{"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "txn", "gtxn", "txna", "gtxna", "global", "load", "store"}},
{"Flow Control", []string{"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2"}},
{"State Access", []string{"balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get"}},
}
// OpCost returns the relative cost score for an op
func OpCost(opName string) int {
return opsByName[LogicVersion][opName].opSize.cost
}
// OpAllCosts returns an array of the relative cost score for an op by version.
// If all the costs are the same the array is single entry
// otherwise it has costs by op version
func OpAllCosts(opName string) []int {
cost := opsByName[LogicVersion][opName].opSize.cost
costs := make([]int, LogicVersion+1)
isDifferent := false
for v := 1; v <= LogicVersion; v++ {
costs[v] = opsByName[v][opName].opSize.cost
if costs[v] > 0 && costs[v] != cost {
isDifferent = true
}
}
if !isDifferent {
return []int{cost}
}
return costs
}
// OpSize returns the number of bytes for an op. 0 for variable.
func OpSize(opName string) int {
return opsByName[LogicVersion][opName].opSize.size
}
// see assembler.go TxnTypeNames
// also used to parse symbolic constants for `int`
var typeEnumDescriptions = []stringString{
{string(protocol.UnknownTx), "Unknown type. Invalid transaction"},
{string(protocol.PaymentTx), "Payment"},
{string(protocol.KeyRegistrationTx), "KeyRegistration"},
{string(protocol.AssetConfigTx), "AssetConfig"},
{string(protocol.AssetTransferTx), "AssetTransfer"},
{string(protocol.AssetFreezeTx), "AssetFreeze"},
{string(protocol.ApplicationCallTx), "ApplicationCall"},
}
// TypeNameDescription returns extra description about a low level protocol transaction Type string
func TypeNameDescription(typeName string) string {
for _, ted := range typeEnumDescriptions {
if typeName == ted.a {
return ted.b
}
}
return "invalid type name"
}
// see assembler.go TxnTypeNames
// also used to parse symbolic constants for `int`
var onCompletionDescriptions = map[OnCompletionConstType]string{
NoOp: "Only execute the `ApprovalProgram` associated with this application ID, with no additional effects.",
OptIn: "Before executing the `ApprovalProgram`, allocate local state for this application into the sender's account data.",
CloseOut: "After executing the `ApprovalProgram`, clear any local state for this application out of the sender's account data.",
ClearState: "Don't execute the `ApprovalProgram`, and instead execute the `ClearStateProgram` (which may not reject this transaction). Additionally, clear any local state for this application out of the sender's account data as in `CloseOutOC`.",
UpdateApplication: "After executing the `ApprovalProgram`, replace the `ApprovalProgram` and `ClearStateProgram` associated with this application ID with the programs specified in this transaction.",
DeleteApplication: "After executing the `ApprovalProgram`, delete the application parameters from the account data of the application's creator.",
}
// OnCompletionDescription returns extra description about OnCompletion constants
func OnCompletionDescription(value uint64) string {
desc, ok := onCompletionDescriptions[OnCompletionConstType(value)]
if ok {
return desc
}
return "invalid constant value"
}
// OnCompletionPreamble describes what the OnCompletion constants represent.
const OnCompletionPreamble = "An application transaction must indicate the action to be taken following the execution of its approvalProgram or clearStateProgram. The constants below describe the available actions."
var txnFieldDocList = []stringString{
{"Sender", "32 byte address"},
{"Fee", "micro-Algos"},
{"FirstValid", "round number"},
{"FirstValidTime", "Causes program to fail; reserved for future use"},
{"LastValid", "round number"},
{"Receiver", "32 byte address"},
{"Amount", "micro-Algos"},
{"CloseRemainderTo", "32 byte address"},
{"VotePK", "32 byte address"},
{"SelectionPK", "32 byte address"},
//{"VoteFirst", ""},
//{"VoteLast", ""},
{"TypeEnum", "See table below"},
{"XferAsset", "Asset ID"},
{"AssetAmount", "value in Asset's units"},
{"AssetSender", "32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset."},
{"AssetReceiver", "32 byte address"},
{"AssetCloseTo", "32 byte address"},
{"GroupIndex", "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1"},
{"TxID", "The computed ID for this transaction. 32 bytes."},
{"ApplicationID", "ApplicationID from ApplicationCall transaction"},
{"OnCompletion", "ApplicationCall transaction on completion action"},
{"ApplicationArgs", "Arguments passed to the application in the ApplicationCall transaction"},
{"NumAppArgs", "Number of ApplicationArgs"},
{"Accounts", "Accounts listed in the ApplicationCall transaction"},
{"NumAccounts", "Number of Accounts"},
{"ApprovalProgram", "Approval program"},
{"ClearStateProgram", "Clear state program"},
{"RekeyTo", "32 byte Sender's new AuthAddr"},
{"ConfigAsset", "Asset ID in asset config transaction"},
{"ConfigAssetTotal", "Total number of units of this asset created"},
{"ConfigAssetDecimals", "Number of digits to display after the decimal place when displaying the asset"},
{"ConfigAssetDefaultFrozen", "Whether the asset's slots are frozen by default or not, 0 or 1"},
{"ConfigAssetUnitName", "Unit name of the asset"},
{"ConfigAssetName", "The asset name"},
{"ConfigAssetURL", "URL"},
{"ConfigAssetMetadataHash", "32 byte commitment to some unspecified asset metadata"},
{"ConfigAssetManager", "32 byte address"},
{"ConfigAssetReserve", "32 byte address"},
{"ConfigAssetFreeze", "32 byte address"},
{"ConfigAssetClawback", "32 byte address"},
{"FreezeAsset", "Asset ID being frozen or un-frozen"},
{"FreezeAssetAccount", "32 byte address of the account whose asset slot is being frozen or un-frozen"},
{"FreezeAssetFrozen", "The new frozen value, 0 or 1"},
}
// TxnFieldDocs are notes on fields available by `txn` and `gtxn`
var txnFieldDocs map[string]string
// TxnFieldDocs are notes on fields available by `txn` and `gtxn` with extra versioning info if any
func TxnFieldDocs() map[string]string {
return fieldsDocWithExtra(txnFieldDocs, txnFieldSpecByName)
}
var globalFieldDocList = []stringString{
{"MinTxnFee", "micro Algos"},
{"MinBalance", "micro Algos"},
{"MaxTxnLife", "rounds"},
{"ZeroAddress", "32 byte address of all zero bytes"},
{"GroupSize", "Number of transactions in this atomic transaction group. At least 1"},
{"LogicSigVersion", "Maximum supported TEAL version"},
{"Round", "Current round number"},
{"LatestTimestamp", "Last confirmed block UNIX timestamp. Fails if negative"},
{"CurrentApplicationID", "ID of current application executing. Fails if no such application is executing"},
}
// globalFieldDocs are notes on fields available in `global`
var globalFieldDocs map[string]string
// GlobalFieldDocs are notes on fields available in `global` with extra versioning info if any
func GlobalFieldDocs() map[string]string {
return fieldsDocWithExtra(globalFieldDocs, globalFieldSpecByName)
}
type extractor interface {
getExtraFor(string) string
}
func fieldsDocWithExtra(source map[string]string, ex extractor) map[string]string {
result := make(map[string]string, len(source))
for name, doc := range source {
if extra := ex.getExtraFor(name); len(extra) > 0 {
if len(doc) == 0 {
doc = extra
} else {
sep := ". "
if doc[len(doc)-1] == '.' {
sep = " "
}
doc = fmt.Sprintf("%s%s%s", doc, sep, extra)
}
}
result[name] = doc
}
return result
}
var assetHoldingFieldDocList = []stringString{
{"AssetBalance", "Amount of the asset unit held by this account"},
{"AssetFrozen", "Is the asset frozen or not"},
}
// AssetHoldingFieldDocs are notes on fields available in `asset_holding_get`
var AssetHoldingFieldDocs map[string]string
var assetParamsFieldDocList = []stringString{
{"AssetTotal", "Total number of units of this asset"},
{"AssetDecimals", "See AssetParams.Decimals"},
{"AssetDefaultFrozen", "Frozen by default or not"},
{"AssetUnitName", "Asset unit name"},
{"AssetName", "Asset name"},
{"AssetURL", "URL with additional info about the asset"},
{"AssetMetadataHash", "Arbitrary commitment"},
{"AssetManager", "Manager commitment"},
{"AssetReserve", "Reserve address"},
{"AssetFreeze", "Freeze address"},
{"AssetClawback", "Clawback address"},
}
// AssetParamsFieldDocs are notes on fields available in `asset_params_get`
var AssetParamsFieldDocs map[string]string
func init() {
txnFieldDocs = stringStringListToMap(txnFieldDocList)
globalFieldDocs = stringStringListToMap(globalFieldDocList)
AssetHoldingFieldDocs = stringStringListToMap(assetHoldingFieldDocList)
AssetParamsFieldDocs = stringStringListToMap(assetParamsFieldDocList)
}
| 1 | 41,691 | 'fail unless X is a non-zero number' it will also fail if X is a byte string | algorand-go-algorand | go |
@@ -1447,10 +1447,13 @@ class ElementPlot(BokehPlot, GenericElementPlot):
"""
Resets RangeXY streams if norm option is set to framewise
"""
- if self.overlaid:
+ # Temporarily reverts this fix (see https://github.com/holoviz/holoviews/issues/4396)
+ # This fix caused PlotSize change events to rerender
+ # rasterized/datashaded with the full extents which was wrong
+ if self.overlaid or True:
return
for el, callbacks in self.traverse(lambda x: (x.current_frame, x.callbacks)):
- if el is None:
+ if el is None:W
continue
for callback in callbacks:
norm = self.lookup_options(el, 'norm').options | 1 | from __future__ import absolute_import, division, unicode_literals
import sys
import warnings
from types import FunctionType
import param
import numpy as np
import bokeh
import bokeh.plotting
from bokeh.core.properties import value
from bokeh.document.events import ModelChangedEvent
from bokeh.models import (
ColorBar, ColorMapper, Legend, Renderer, Title, tools
)
from bokeh.models.axes import CategoricalAxis, DatetimeAxis
from bokeh.models.formatters import (
FuncTickFormatter, TickFormatter, MercatorTickFormatter
)
from bokeh.models.mappers import (
LinearColorMapper, LogColorMapper, CategoricalColorMapper
)
from bokeh.models.ranges import Range1d, DataRange1d, FactorRange
from bokeh.models.tickers import (
Ticker, BasicTicker, FixedTicker, LogTicker, MercatorTicker
)
from bokeh.models.tools import Tool
from bokeh.models.widgets import Panel, Tabs
from ...core import DynamicMap, CompositeOverlay, Element, Dimension, Dataset
from ...core.options import abbreviated_exception, SkipRendering
from ...core import util
from ...element import Annotation, Graph, VectorField, Path, Contours, Tiles
from ...streams import Buffer, RangeXY, PlotSize
from ...util.transform import dim
from ..plot import GenericElementPlot, GenericOverlayPlot
from ..util import process_cmap, color_intervals, dim_range_key
from .callbacks import PlotSizeCallback
from .plot import BokehPlot
from .styles import (
base_properties, legend_dimensions, line_properties, mpl_to_bokeh,
property_prefixes, rgba_tuple, text_properties, validate
)
from .tabular import TablePlot
from .util import (
TOOL_TYPES, bokeh_version, date_to_integer, decode_bytes, get_tab_title,
glyph_order, py2js_tickformatter, recursive_model_update,
theme_attr_json, cds_column_replace, hold_policy, match_dim_specs,
compute_layout_properties, wrap_formatter, match_ax_type, remove_legend
)
try:
from bokeh.models import EqHistColorMapper
except ImportError:
EqHistColorMapper = None
try:
from bokeh.models import BinnedTicker
except ImportError:
BinnedTicker = None
if bokeh_version >= '2.0.1':
try:
TOOLS_MAP = Tool._known_aliases
except Exception:
TOOLS_MAP = TOOL_TYPES
elif bokeh_version >= '2.0.0':
from bokeh.plotting._tools import TOOLS_MAP
else:
from bokeh.plotting.helpers import _known_tools as TOOLS_MAP
class ElementPlot(BokehPlot, GenericElementPlot):
active_tools = param.List(default=[], doc="""
Allows specifying which tools are active by default. Note
that only one tool per gesture type can be active, e.g.
both 'pan' and 'box_zoom' are drag tools, so if both are
listed only the last one will be active.""")
align = param.ObjectSelector(default='start', objects=['start', 'center', 'end'], doc="""
Alignment (vertical or horizontal) of the plot in a layout.""")
border = param.Number(default=10, doc="""
Minimum border around plot.""")
aspect = param.Parameter(default=None, doc="""
The aspect ratio mode of the plot. By default, a plot may
select its own appropriate aspect ratio but sometimes it may
be necessary to force a square aspect ratio (e.g. to display
the plot as an element of a grid). The modes 'auto' and
'equal' correspond to the axis modes of the same name in
matplotlib, a numeric value specifying the ratio between plot
width and height may also be passed. To control the aspect
ratio between the axis scales use the data_aspect option
instead.""")
data_aspect = param.Number(default=None, doc="""
Defines the aspect of the axis scaling, i.e. the ratio of
y-unit to x-unit.""")
width = param.Integer(default=300, allow_None=True, bounds=(0, None), doc="""
The width of the component (in pixels). This can be either
fixed or preferred width, depending on width sizing policy.""")
height = param.Integer(default=300, allow_None=True, bounds=(0, None), doc="""
The height of the component (in pixels). This can be either
fixed or preferred height, depending on height sizing policy.""")
frame_width = param.Integer(default=None, allow_None=True, bounds=(0, None), doc="""
The width of the component (in pixels). This can be either
fixed or preferred width, depending on width sizing policy.""")
frame_height = param.Integer(default=None, allow_None=True, bounds=(0, None), doc="""
The height of the component (in pixels). This can be either
fixed or preferred height, depending on height sizing policy.""")
min_width = param.Integer(default=None, bounds=(0, None), doc="""
Minimal width of the component (in pixels) if width is adjustable.""")
min_height = param.Integer(default=None, bounds=(0, None), doc="""
Minimal height of the component (in pixels) if height is adjustable.""")
max_width = param.Integer(default=None, bounds=(0, None), doc="""
Minimal width of the component (in pixels) if width is adjustable.""")
max_height = param.Integer(default=None, bounds=(0, None), doc="""
Minimal height of the component (in pixels) if height is adjustable.""")
margin = param.Parameter(default=None, doc="""
Allows to create additional space around the component. May
be specified as a two-tuple of the form (vertical, horizontal)
or a four-tuple (top, right, bottom, left).""")
responsive = param.ObjectSelector(default=False, objects=[False, True, 'width', 'height'])
fontsize = param.Parameter(default={'title': '12pt'}, allow_None=True, doc="""
Specifies various fontsizes of the displayed text.
Finer control is available by supplying a dictionary where any
unmentioned keys reverts to the default sizes, e.g:
{'ticks': '20pt', 'title': '15pt', 'ylabel': '5px', 'xlabel': '5px'}""")
gridstyle = param.Dict(default={}, doc="""
Allows customizing the grid style, e.g. grid_line_color defines
the line color for both grids while xgrid_line_color exclusively
customizes the x-axis grid lines.""")
labelled = param.List(default=['x', 'y'], doc="""
Whether to plot the 'x' and 'y' labels.""")
lod = param.Dict(default={'factor': 10, 'interval': 300,
'threshold': 2000, 'timeout': 500}, doc="""
Bokeh plots offer "Level of Detail" (LOD) capability to
accommodate large (but not huge) amounts of data. The available
options are:
* factor : Decimation factor to use when applying
decimation.
* interval : Interval (in ms) downsampling will be enabled
after an interactive event.
* threshold : Number of samples before downsampling is enabled.
* timeout : Timeout (in ms) for checking whether interactive
tool events are still occurring.""")
show_frame = param.Boolean(default=True, doc="""
Whether or not to show a complete frame around the plot.""")
shared_axes = param.Boolean(default=True, doc="""
Whether to invert the share axes across plots
for linked panning and zooming.""")
default_tools = param.List(default=['save', 'pan', 'wheel_zoom',
'box_zoom', 'reset'],
doc="A list of plugin tools to use on the plot.")
tools = param.List(default=[], doc="""
A list of plugin tools to use on the plot.""")
toolbar = param.ObjectSelector(default='right',
objects=["above", "below",
"left", "right", "disable", None],
doc="""
The toolbar location, must be one of 'above', 'below',
'left', 'right', None.""")
xformatter = param.ClassSelector(
default=None, class_=(util.basestring, TickFormatter, FunctionType), doc="""
Formatter for ticks along the x-axis.""")
yformatter = param.ClassSelector(
default=None, class_=(util.basestring, TickFormatter, FunctionType), doc="""
Formatter for ticks along the x-axis.""")
_categorical = False
_allow_implicit_categories = True
# Declare which styles cannot be mapped to a non-scalar dimension
_nonvectorized_styles = []
# Declares the default types for continuous x- and y-axes
_x_range_type = Range1d
_y_range_type = Range1d
# Whether the plot supports streaming data
_stream_data = True
def __init__(self, element, plot=None, **params):
self.current_ranges = None
super(ElementPlot, self).__init__(element, **params)
self.handles = {} if plot is None else self.handles['plot']
self.static = len(self.hmap) == 1 and len(self.keys) == len(self.hmap)
self.callbacks, self.source_streams = self._construct_callbacks()
self.static_source = False
self.streaming = [s for s in self.streams if isinstance(s, Buffer)]
self.geographic = bool(self.hmap.last.traverse(lambda x: x, Tiles))
if self.geographic and self.projection is None:
self.projection = 'mercator'
# Whether axes are shared between plots
self._shared = {'x': False, 'y': False}
# Flag to check whether plot has been updated
self._updated = False
def _hover_opts(self, element):
if self.batched:
dims = list(self.hmap.last.kdims)
else:
dims = list(self.overlay_dims.keys())
dims += element.dimensions()
return list(util.unique_iterator(dims)), {}
def _init_tools(self, element, callbacks=[]):
"""
Processes the list of tools to be supplied to the plot.
"""
tooltips, hover_opts = self._hover_opts(element)
tooltips = [(ttp.pprint_label, '@{%s}' % util.dimension_sanitizer(ttp.name))
if isinstance(ttp, Dimension) else ttp for ttp in tooltips]
if not tooltips: tooltips = None
callbacks = callbacks+self.callbacks
cb_tools, tool_names = [], []
hover = False
for cb in callbacks:
for handle in cb.models+cb.extra_models:
if handle and handle in TOOLS_MAP:
tool_names.append(handle)
if handle == 'hover':
tool = tools.HoverTool(
tooltips=tooltips, tags=['hv_created'],
**hover_opts)
hover = tool
else:
tool = TOOLS_MAP[handle]()
cb_tools.append(tool)
self.handles[handle] = tool
tool_list = [
t for t in cb_tools + self.default_tools + self.tools
if t not in tool_names]
tool_list = [
tools.HoverTool(tooltips=tooltips, tags=['hv_created'], mode=tl, **hover_opts)
if tl in ['vline', 'hline'] else tl for tl in tool_list
]
copied_tools = []
for tool in tool_list:
if isinstance(tool, tools.Tool):
properties = tool.properties_with_values(include_defaults=False)
tool = type(tool)(**properties)
copied_tools.append(tool)
hover_tools = [t for t in copied_tools if isinstance(t, tools.HoverTool)]
if 'hover' in copied_tools:
hover = tools.HoverTool(tooltips=tooltips, tags=['hv_created'], **hover_opts)
copied_tools[copied_tools.index('hover')] = hover
elif any(hover_tools):
hover = hover_tools[0]
if hover:
self.handles['hover'] = hover
box_tools = [t for t in copied_tools if isinstance(t, tools.BoxSelectTool)]
if box_tools:
self.handles['box_select'] = box_tools[0]
lasso_tools = [t for t in copied_tools if isinstance(t, tools.LassoSelectTool)]
if lasso_tools:
self.handles['lasso_select'] = lasso_tools[0]
# Link the selection properties between tools
if box_tools and lasso_tools:
box_tools[0].js_link('mode', lasso_tools[0], 'mode')
lasso_tools[0].js_link('mode', box_tools[0], 'mode')
return copied_tools
def _update_hover(self, element):
tool = self.handles['hover']
if 'hv_created' in tool.tags:
tooltips, hover_opts = self._hover_opts(element)
tooltips = [(ttp.pprint_label, '@{%s}' % util.dimension_sanitizer(ttp.name))
if isinstance(ttp, Dimension) else ttp for ttp in tooltips]
tool.tooltips = tooltips
else:
plot_opts = element.opts.get('plot', 'bokeh')
new_hover = [t for t in plot_opts.kwargs.get('tools', [])
if isinstance(t, tools.HoverTool)]
if new_hover:
tool.tooltips = new_hover[0].tooltips
def _get_hover_data(self, data, element, dimensions=None):
"""
Initializes hover data based on Element dimension values.
If empty initializes with no data.
"""
if 'hover' not in self.handles or self.static_source:
return
for d in (dimensions or element.dimensions()):
dim = util.dimension_sanitizer(d.name)
if dim not in data:
data[dim] = element.dimension_values(d)
for k, v in self.overlay_dims.items():
dim = util.dimension_sanitizer(k.name)
if dim not in data:
data[dim] = [v for _ in range(len(list(data.values())[0]))]
def _merge_ranges(self, plots, xspecs, yspecs, xtype, ytype):
"""
Given a list of other plots return axes that are shared
with another plot by matching the dimensions specs stored
as tags on the dimensions.
"""
plot_ranges = {}
for plot in plots:
if plot is None:
continue
if hasattr(plot, 'x_range') and plot.x_range.tags and xspecs is not None:
if match_dim_specs(plot.x_range.tags[0], xspecs) and match_ax_type(plot.xaxis, xtype):
plot_ranges['x_range'] = plot.x_range
if match_dim_specs(plot.x_range.tags[0], yspecs) and match_ax_type(plot.xaxis, ytype):
plot_ranges['y_range'] = plot.x_range
if hasattr(plot, 'y_range') and plot.y_range.tags and yspecs is not None:
if match_dim_specs(plot.y_range.tags[0], yspecs) and match_ax_type(plot.yaxis, ytype):
plot_ranges['y_range'] = plot.y_range
if match_dim_specs(plot.y_range.tags[0], xspecs) and match_ax_type(plot.yaxis, xtype):
plot_ranges['x_range'] = plot.y_range
return plot_ranges
def _get_axis_dims(self, element):
"""Returns the dimensions corresponding to each axis.
Should return a list of dimensions or list of lists of
dimensions, which will be formatted to label the axis
and to link axes.
"""
dims = element.dimensions()[:2]
if len(dims) == 1:
return dims + [None, None]
else:
return dims + [None]
def _axes_props(self, plots, subplots, element, ranges):
# Get the bottom layer and range element
el = element.traverse(lambda x: x, [lambda el: isinstance(el, Element) and not isinstance(el, (Annotation, Tiles))])
el = el[0] if el else element
dims = self._get_axis_dims(el)
xlabel, ylabel, zlabel = self._get_axis_labels(dims)
if self.invert_axes:
xlabel, ylabel = ylabel, xlabel
dims = dims[:2][::-1]
xdims, ydims = dims[:2]
if xdims:
if not isinstance(xdims, list):
xdims = [xdims]
xspecs = tuple((xd.name, xd.label, xd.unit) for xd in xdims)
else:
xspecs = None
if ydims:
if not isinstance(ydims, list):
ydims = [ydims]
yspecs = tuple((yd.name, yd.label, yd.unit) for yd in ydims)
else:
yspecs = None
# Get the Element that determines the range and get_extents
range_el = el if self.batched and not isinstance(self, OverlayPlot) else element
l, b, r, t = self.get_extents(range_el, ranges)
if self.invert_axes:
l, b, r, t = b, l, t, r
categorical = any(self.traverse(lambda x: x._categorical))
if xdims is not None and any(xdim.name in ranges and 'factors' in ranges[xdim.name] for xdim in xdims):
categorical_x = True
else:
categorical_x = any(isinstance(x, (util.basestring, bytes)) for x in (l, r))
if ydims is not None and any(ydim.name in ranges and 'factors' in ranges[ydim.name] for ydim in ydims):
categorical_y = True
else:
categorical_y = any(isinstance(y, (util.basestring, bytes)) for y in (b, t))
range_types = (self._x_range_type, self._y_range_type)
if self.invert_axes: range_types = range_types[::-1]
x_range_type, y_range_type = range_types
x_axis_type = 'log' if self.logx else 'auto'
if xdims:
if len(xdims) > 1 or x_range_type is FactorRange:
x_axis_type = 'auto'
categorical_x = True
else:
if isinstance(el, Graph):
xtype = el.nodes.get_dimension_type(xdims[0])
else:
xtype = el.get_dimension_type(xdims[0])
if ((xtype is np.object_ and issubclass(type(l), util.datetime_types)) or
xtype in util.datetime_types):
x_axis_type = 'datetime'
y_axis_type = 'log' if self.logy else 'auto'
if ydims:
if len(ydims) > 1 or y_range_type is FactorRange:
y_axis_type = 'auto'
categorical_y = True
else:
if isinstance(el, Graph):
ytype = el.nodes.get_dimension_type(ydims[0])
else:
ytype = el.get_dimension_type(ydims[0])
if ((ytype is np.object_ and issubclass(type(b), util.datetime_types))
or ytype in util.datetime_types):
y_axis_type = 'datetime'
plot_ranges = {}
# Try finding shared ranges in other plots in the same Layout
norm_opts = self.lookup_options(el, 'norm').options
if plots and self.shared_axes and not norm_opts.get('axiswise', False):
plot_ranges = self._merge_ranges(plots, xspecs, yspecs, x_axis_type, y_axis_type)
# Declare shared axes
x_range, y_range = plot_ranges.get('x_range'), plot_ranges.get('y_range')
if x_range and not (x_range_type is FactorRange and not isinstance(x_range, FactorRange)):
self._shared['x'] = True
if y_range and not (y_range_type is FactorRange and not isinstance(y_range, FactorRange)):
self._shared['y'] = True
if self._shared['x']:
pass
elif categorical or categorical_x:
x_axis_type = 'auto'
plot_ranges['x_range'] = FactorRange()
else:
plot_ranges['x_range'] = x_range_type()
if self._shared['y']:
pass
elif categorical or categorical_y:
y_axis_type = 'auto'
plot_ranges['y_range'] = FactorRange()
elif 'y_range' not in plot_ranges:
plot_ranges['y_range'] = y_range_type()
x_range, y_range = plot_ranges['x_range'], plot_ranges['y_range']
if not x_range.tags and xspecs is not None:
x_range.tags.append(xspecs)
if not y_range.tags and yspecs is not None:
y_range.tags.append(yspecs)
return (x_axis_type, y_axis_type), (xlabel, ylabel, zlabel), plot_ranges
def _init_plot(self, key, element, plots, ranges=None):
"""
Initializes Bokeh figure to draw Element into and sets basic
figure and axis attributes including axes types, labels,
titles and plot height and width.
"""
subplots = list(self.subplots.values()) if self.subplots else []
axis_types, labels, plot_ranges = self._axes_props(plots, subplots, element, ranges)
xlabel, ylabel, _ = labels
x_axis_type, y_axis_type = axis_types
properties = dict(plot_ranges)
properties['x_axis_label'] = xlabel if 'x' in self.labelled or self.xlabel else ' '
properties['y_axis_label'] = ylabel if 'y' in self.labelled or self.ylabel else ' '
if not self.show_frame:
properties['outline_line_alpha'] = 0
if self.show_title and self.adjoined is None:
title = self._format_title(key, separator=' ')
else:
title = ''
if self.toolbar != 'disable':
tools = self._init_tools(element)
properties['tools'] = tools
properties['toolbar_location'] = self.toolbar
else:
properties['tools'] = []
properties['toolbar_location'] = None
if self.renderer.webgl:
properties['output_backend'] = 'webgl'
properties.update(**self._plot_properties(key, element))
with warnings.catch_warnings():
# Bokeh raises warnings about duplicate tools but these
# are not really an issue
warnings.simplefilter('ignore', UserWarning)
return bokeh.plotting.Figure(x_axis_type=x_axis_type,
y_axis_type=y_axis_type, title=title,
**properties)
def _plot_properties(self, key, element):
"""
Returns a dictionary of plot properties.
"""
init = 'plot' not in self.handles
size_multiplier = self.renderer.size/100.
options = self._traverse_options(element, 'plot', ['width', 'height'], defaults=False)
logger = self.param if init else None
aspect_props, dimension_props = compute_layout_properties(
self.width, self.height, self.frame_width, self.frame_height,
options.get('width'), options.get('height'), self.aspect, self.data_aspect,
self.responsive, size_multiplier, logger=logger)
if not init:
if aspect_props['aspect_ratio'] is None:
aspect_props['aspect_ratio'] = self.state.aspect_ratio
if self.dynamic and aspect_props['match_aspect']:
# Sync the plot size on dynamic plots to support accurate
# scaling of dimension ranges
plot_size = [s for s in self.streams if isinstance(s, PlotSize)]
callbacks = [c for c in self.callbacks if isinstance(c, PlotSizeCallback)]
if plot_size:
stream = plot_size[0]
elif callbacks:
stream = callbacks[0].streams[0]
else:
stream = PlotSize()
self.callbacks.append(PlotSizeCallback(self, [stream], None))
stream.add_subscriber(self._update_size)
plot_props = {
'align': self.align,
'margin': self.margin,
'max_width': self.max_width,
'max_height': self.max_height,
'min_width': self.min_width,
'min_height': self.min_height
}
plot_props.update(aspect_props)
if not self.drawn:
plot_props.update(dimension_props)
if self.bgcolor:
plot_props['background_fill_color'] = self.bgcolor
if self.border is not None:
for p in ['left', 'right', 'top', 'bottom']:
plot_props['min_border_'+p] = self.border
lod = dict(self.param.defaults().get('lod', {}), **self.lod)
for lod_prop, v in lod.items():
plot_props['lod_'+lod_prop] = v
return plot_props
def _update_size(self, width, height, scale):
self.state.frame_width = width
self.state.frame_height = height
def _set_active_tools(self, plot):
"Activates the list of active tools"
for tool in self.active_tools:
if isinstance(tool, util.basestring):
tool_type = TOOL_TYPES[tool]
matching = [t for t in plot.toolbar.tools
if isinstance(t, tool_type)]
if not matching:
self.param.warning('Tool of type %r could not be found '
'and could not be activated by default.'
% tool)
continue
tool = matching[0]
if isinstance(tool, tools.Drag):
plot.toolbar.active_drag = tool
if isinstance(tool, tools.Scroll):
plot.toolbar.active_scroll = tool
if isinstance(tool, tools.Tap):
plot.toolbar.active_tap = tool
if isinstance(tool, tools.Inspection):
plot.toolbar.active_inspect.append(tool)
def _title_properties(self, key, plot, element):
if self.show_title and self.adjoined is None:
title = self._format_title(key, separator=' ')
else:
title = ''
opts = dict(text=title)
# this will override theme if not set to the default 12pt
title_font = self._fontsize('title').get('fontsize')
if title_font != '12pt':
opts['text_font_size'] = value(title_font)
return opts
def _init_axes(self, plot):
if self.xaxis is None:
plot.xaxis.visible = False
elif isinstance(self.xaxis, util.basestring) and 'top' in self.xaxis:
plot.above = plot.below
plot.below = []
plot.xaxis[:] = plot.above
self.handles['xaxis'] = plot.xaxis[0]
self.handles['x_range'] = plot.x_range
if self.yaxis is None:
plot.yaxis.visible = False
elif isinstance(self.yaxis, util.basestring) and'right' in self.yaxis:
plot.right = plot.left
plot.left = []
plot.yaxis[:] = plot.right
self.handles['yaxis'] = plot.yaxis[0]
self.handles['y_range'] = plot.y_range
def _axis_properties(self, axis, key, plot, dimension=None,
ax_mapping={'x': 0, 'y': 1}):
"""
Returns a dictionary of axis properties depending
on the specified axis.
"""
# need to copy dictionary by calling dict() on it
axis_props = dict(theme_attr_json(self.renderer.theme, 'Axis'))
if ((axis == 'x' and self.xaxis in ['bottom-bare', 'top-bare', 'bare']) or
(axis == 'y' and self.yaxis in ['left-bare', 'right-bare', 'bare'])):
axis_props['axis_label_text_font_size'] = value('0pt')
axis_props['major_label_text_font_size'] = value('0pt')
axis_props['major_tick_line_color'] = None
axis_props['minor_tick_line_color'] = None
else:
labelsize = self._fontsize('%slabel' % axis).get('fontsize')
if labelsize:
axis_props['axis_label_text_font_size'] = labelsize
ticksize = self._fontsize('%sticks' % axis, common=False).get('fontsize')
if ticksize:
axis_props['major_label_text_font_size'] = value(ticksize)
rotation = self.xrotation if axis == 'x' else self.yrotation
if rotation:
axis_props['major_label_orientation'] = np.radians(rotation)
ticker = self.xticks if axis == 'x' else self.yticks
if isinstance(ticker, Ticker):
axis_props['ticker'] = ticker
elif isinstance(ticker, int):
axis_props['ticker'] = BasicTicker(desired_num_ticks=ticker)
elif isinstance(ticker, (tuple, list)):
if all(isinstance(t, tuple) for t in ticker):
ticks, labels = zip(*ticker)
# Ensure floats which are integers are serialized as ints
# because in JS the lookup fails otherwise
ticks = [int(t) if isinstance(t, float) and t.is_integer() else t
for t in ticks]
labels = [l if isinstance(l, util.basestring) else str(l)
for l in labels]
axis_props['ticker'] = FixedTicker(ticks=ticks)
axis_props['major_label_overrides'] = dict(zip(ticks, labels))
else:
axis_props['ticker'] = FixedTicker(ticks=ticker)
formatter = self.xformatter if axis == 'x' else self.yformatter
if formatter:
formatter = wrap_formatter(formatter, axis)
if formatter is not None:
axis_props['formatter'] = formatter
elif FuncTickFormatter is not None and ax_mapping and isinstance(dimension, Dimension):
formatter = None
if dimension.value_format:
formatter = dimension.value_format
elif dimension.type in dimension.type_formatters:
formatter = dimension.type_formatters[dimension.type]
if formatter:
msg = ('%s dimension formatter could not be '
'converted to tick formatter. ' % dimension.name)
jsfunc = py2js_tickformatter(formatter, msg)
if jsfunc:
formatter = FuncTickFormatter(code=jsfunc)
axis_props['formatter'] = formatter
if axis == 'x':
axis_obj = plot.xaxis[0]
elif axis == 'y':
axis_obj = plot.yaxis[0]
if self.geographic and self.projection == 'mercator':
dimension = 'lon' if axis == 'x' else 'lat'
axis_props['ticker'] = MercatorTicker(dimension=dimension)
axis_props['formatter'] = MercatorTickFormatter(dimension=dimension)
box_zoom = self.state.select(type=tools.BoxZoomTool)
if box_zoom:
box_zoom[0].match_aspect = True
elif isinstance(axis_obj, CategoricalAxis):
for key in list(axis_props):
if key.startswith('major_label'):
# set the group labels equal to major (actually minor)
new_key = key.replace('major_label', 'group')
axis_props[new_key] = axis_props[key]
# major ticks are actually minor ticks in a categorical
# so if user inputs minor ticks sizes, then use that;
# else keep major (group) == minor (subgroup)
msize = self._fontsize('minor_{0}ticks'.format(axis),
common=False).get('fontsize')
if msize is not None:
axis_props['major_label_text_font_size'] = msize
return axis_props
def _update_plot(self, key, plot, element=None):
"""
Updates plot parameters on every frame
"""
plot.update(**self._plot_properties(key, element))
self._update_labels(key, plot, element)
self._update_title(key, plot, element)
self._update_grid(plot)
def _update_labels(self, key, plot, element):
el = element.traverse(lambda x: x, [Element])
el = el[0] if el else element
dimensions = self._get_axis_dims(el)
props = {axis: self._axis_properties(axis, key, plot, dim)
for axis, dim in zip(['x', 'y'], dimensions)}
xlabel, ylabel, zlabel = self._get_axis_labels(dimensions)
if self.invert_axes:
xlabel, ylabel = ylabel, xlabel
props['x']['axis_label'] = xlabel if 'x' in self.labelled or self.xlabel else ''
props['y']['axis_label'] = ylabel if 'y' in self.labelled or self.ylabel else ''
recursive_model_update(plot.xaxis[0], props.get('x', {}))
recursive_model_update(plot.yaxis[0], props.get('y', {}))
def _update_title(self, key, plot, element):
if plot.title:
plot.title.update(**self._title_properties(key, plot, element))
else:
plot.title = Title(**self._title_properties(key, plot, element))
def _update_grid(self, plot):
if not self.show_grid:
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
return
replace = ['bounds', 'bands', 'visible', 'level', 'ticker', 'visible']
style_items = list(self.gridstyle.items())
both = {k: v for k, v in style_items if k.startswith('grid_') or k.startswith('minor_grid')}
xgrid = {k.replace('xgrid', 'grid'): v for k, v in style_items if 'xgrid' in k}
ygrid = {k.replace('ygrid', 'grid'): v for k, v in style_items if 'ygrid' in k}
xopts = {k.replace('grid_', '') if any(r in k for r in replace) else k: v
for k, v in dict(both, **xgrid).items()}
yopts = {k.replace('grid_', '') if any(r in k for r in replace) else k: v
for k, v in dict(both, **ygrid).items()}
if plot.xaxis and 'ticker' not in xopts:
xopts['ticker'] = plot.xaxis[0].ticker
if plot.yaxis and 'ticker' not in yopts:
yopts['ticker'] = plot.yaxis[0].ticker
plot.xgrid[0].update(**xopts)
plot.ygrid[0].update(**yopts)
def _update_ranges(self, element, ranges):
plot = self.handles['plot']
x_range = self.handles['x_range']
y_range = self.handles['y_range']
l, b, r, t = None, None, None, None
if any(isinstance(r, (Range1d, DataRange1d)) for r in [x_range, y_range]):
l, b, r, t = self.get_extents(element, ranges)
if self.invert_axes:
l, b, r, t = b, l, t, r
xfactors, yfactors = None, None
if any(isinstance(ax_range, FactorRange) for ax_range in [x_range, y_range]):
xfactors, yfactors = self._get_factors(element, ranges)
framewise = self.framewise
streaming = (self.streaming and any(stream._triggering and stream.following
for stream in self.streaming))
xupdate = ((not (self.model_changed(x_range) or self.model_changed(plot))
and (framewise or streaming))
or xfactors is not None)
yupdate = ((not (self.model_changed(x_range) or self.model_changed(plot))
and (framewise or streaming))
or yfactors is not None)
options = self._traverse_options(element, 'plot', ['width', 'height'], defaults=False)
fixed_width = (self.frame_width or options.get('width'))
fixed_height = (self.frame_height or options.get('height'))
constrained_width = options.get('min_width') or options.get('max_width')
constrained_height = options.get('min_height') or options.get('max_height')
data_aspect = (self.aspect == 'equal' or self.data_aspect)
xaxis, yaxis = self.handles['xaxis'], self.handles['yaxis']
categorical = isinstance(xaxis, CategoricalAxis) or isinstance(yaxis, CategoricalAxis)
datetime = isinstance(xaxis, DatetimeAxis) or isinstance(yaxis, CategoricalAxis)
if data_aspect and (categorical or datetime):
ax_type = 'categorical' if categorical else 'datetime axes'
self.param.warning('Cannot set data_aspect if one or both '
'axes are %s, the option will '
'be ignored.' % ax_type)
elif data_aspect:
plot = self.handles['plot']
xspan = r-l if util.is_number(l) and util.is_number(r) else None
yspan = t-b if util.is_number(b) and util.is_number(t) else None
if self.drawn or (fixed_width and fixed_height) or (constrained_width or constrained_height):
# After initial draw or if aspect is explicit
# adjust range to match the plot dimension aspect
ratio = self.data_aspect or 1
if self.aspect == 'square':
frame_aspect = 1
elif self.aspect and self.aspect != 'equal':
frame_aspect = self.aspect
else:
frame_aspect = plot.frame_height/plot.frame_width
range_streams = [s for s in self.streams if isinstance(s, RangeXY)]
if self.drawn:
current_l, current_r = plot.x_range.start, plot.x_range.end
current_b, current_t = plot.y_range.start, plot.y_range.end
current_xspan, current_yspan = (current_r-current_l), (current_t-current_b)
else:
current_l, current_r, current_b, current_t = l, r, b, t
current_xspan, current_yspan = xspan, yspan
if any(rs._triggering for rs in range_streams):
# If the event was triggered by a RangeXY stream
# event we want to get the latest range span
# values so we do not accidentally trigger a
# loop of events
l, r, b, t = current_l, current_r, current_b, current_t
xspan, yspan = current_xspan, current_yspan
size_streams = [s for s in self.streams if isinstance(s, PlotSize)]
if any(ss._triggering for ss in size_streams) and self._updated:
# Do not trigger on frame size changes, except for
# the initial one which can be important if width
# and/or height constraints have forced different
# aspect. After initial event we skip because size
# changes can trigger event loops if the tick
# labels change the canvas size
return
desired_xspan = yspan*(ratio/frame_aspect)
desired_yspan = xspan/(ratio/frame_aspect)
if ((np.allclose(desired_xspan, xspan, rtol=0.05) and
np.allclose(desired_yspan, yspan, rtol=0.05)) or
not (util.isfinite(xspan) and util.isfinite(yspan))):
pass
elif desired_yspan >= yspan:
desired_yspan = current_xspan/(ratio/frame_aspect)
ypad = (desired_yspan-yspan)/2.
b, t = b-ypad, t+ypad
yupdate = True
else:
desired_xspan = current_yspan*(ratio/frame_aspect)
xpad = (desired_xspan-xspan)/2.
l, r = l-xpad, r+xpad
xupdate = True
elif not (fixed_height and fixed_width):
# Set initial aspect
aspect = self.get_aspect(xspan, yspan)
width = plot.frame_width or plot.plot_width or 300
height = plot.frame_height or plot.plot_height or 300
if not (fixed_width or fixed_height) and not self.responsive:
fixed_height = True
if fixed_height:
plot.frame_height = height
plot.frame_width = int(height/aspect)
plot.plot_width, plot.plot_height = None, None
elif fixed_width:
plot.frame_width = width
plot.frame_height = int(width*aspect)
plot.plot_width, plot.plot_height = None, None
else:
plot.aspect_ratio = 1./aspect
box_zoom = plot.select(type=tools.BoxZoomTool)
scroll_zoom = plot.select(type=tools.WheelZoomTool)
if box_zoom:
box_zoom.match_aspect = True
if scroll_zoom:
scroll_zoom.zoom_on_axis = False
if not self.drawn or xupdate:
self._update_range(x_range, l, r, xfactors, self.invert_xaxis,
self._shared['x'], self.logx, streaming)
if not self.drawn or yupdate:
self._update_range(y_range, b, t, yfactors, self.invert_yaxis,
self._shared['y'], self.logy, streaming)
def _update_range(self, axis_range, low, high, factors, invert, shared, log, streaming=False):
if isinstance(axis_range, (Range1d, DataRange1d)) and self.apply_ranges:
if isinstance(low, util.cftime_types):
pass
elif (low == high and low is not None):
if isinstance(low, util.datetime_types):
offset = np.timedelta64(500, 'ms')
low, high = np.datetime64(low), np.datetime64(high)
low -= offset
high += offset
else:
offset = abs(low*0.1 if low else 0.5)
low -= offset
high += offset
if shared:
shared = (axis_range.start, axis_range.end)
low, high = util.max_range([(low, high), shared])
if invert: low, high = high, low
if not isinstance(low, util.datetime_types) and log and (low is None or low <= 0):
low = 0.01 if high < 0.01 else 10**(np.log10(high)-2)
self.param.warning(
"Logarithmic axis range encountered value less "
"than or equal to zero, please supply explicit "
"lower-bound to override default of %.3f." % low)
updates = {}
if util.isfinite(low):
updates['start'] = (axis_range.start, low)
updates['reset_start'] = updates['start']
if util.isfinite(high):
updates['end'] = (axis_range.end, high)
updates['reset_end'] = updates['end']
for k, (old, new) in updates.items():
if isinstance(new, util.cftime_types):
new = date_to_integer(new)
axis_range.update(**{k:new})
if streaming and not k.startswith('reset_'):
axis_range.trigger(k, old, new)
elif isinstance(axis_range, FactorRange):
factors = list(decode_bytes(factors))
if invert: factors = factors[::-1]
axis_range.factors = factors
def _categorize_data(self, data, cols, dims):
"""
Transforms non-string or integer types in datasource if the
axis to be plotted on is categorical. Accepts the column data
source data, the columns corresponding to the axes and the
dimensions for each axis, changing the data inplace.
"""
if self.invert_axes:
cols = cols[::-1]
dims = dims[:2][::-1]
ranges = [self.handles['%s_range' % ax] for ax in 'xy']
for i, col in enumerate(cols):
column = data[col]
if (isinstance(ranges[i], FactorRange) and
(isinstance(column, list) or column.dtype.kind not in 'SU')):
data[col] = [dims[i].pprint_value(v) for v in column]
def get_aspect(self, xspan, yspan):
"""
Computes the aspect ratio of the plot
"""
if 'plot' in self.handles and self.state.frame_width and self.state.frame_height:
return self.state.frame_width/self.state.frame_height
elif self.data_aspect:
return (yspan/xspan)*self.data_aspect
elif self.aspect == 'equal':
return yspan/xspan
elif self.aspect == 'square':
return 1
elif self.aspect is not None:
return self.aspect
elif self.width is not None and self.height is not None:
return self.width/self.height
else:
return 1
def _get_factors(self, element, ranges):
"""
Get factors for categorical axes.
"""
xdim, ydim = element.dimensions()[:2]
if xdim.values:
xvals = xdim.values
elif 'factors' in ranges.get(xdim.name, {}):
xvals = ranges[xdim.name]['factors']
else:
xvals = element.dimension_values(0, False)
if ydim.values:
yvals = ydim.values
elif 'factors' in ranges.get(ydim.name, {}):
yvals = ranges[ydim.name]['factors']
else:
yvals = element.dimension_values(1, False)
xvals, yvals = np.asarray(xvals), np.asarray(yvals)
if not self._allow_implicit_categories:
xvals = xvals if xvals.dtype.kind in 'SU' else []
yvals = yvals if yvals.dtype.kind in 'SU' else []
coords = tuple([v if vals.dtype.kind in 'SU' else dim.pprint_value(v) for v in vals]
for dim, vals in [(xdim, xvals), (ydim, yvals)])
if self.invert_axes: coords = coords[::-1]
return coords
def _process_legend(self):
"""
Disables legends if show_legend is disabled.
"""
for l in self.handles['plot'].legend:
l.items[:] = []
l.border_line_alpha = 0
l.background_fill_alpha = 0
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
properties = mpl_to_bokeh(properties)
plot_method = self._plot_methods.get('batched' if self.batched else 'single')
if isinstance(plot_method, tuple):
# Handle alternative plot method for flipped axes
plot_method = plot_method[int(self.invert_axes)]
renderer = getattr(plot, plot_method)(**dict(properties, **mapping))
return renderer, renderer.glyph
def _apply_transforms(self, element, data, ranges, style, group=None):
new_style = dict(style)
prefix = group+'_' if group else ''
for k, v in dict(style).items():
if isinstance(v, util.basestring):
if validate(k, v) == True:
continue
elif v in element or (isinstance(element, Graph) and v in element.nodes):
v = dim(v)
elif any(d==v for d in self.overlay_dims):
v = dim([d for d in self.overlay_dims if d==v][0])
if (not isinstance(v, dim) or (group is not None and not k.startswith(group))):
continue
elif (not v.applies(element) and v.dimension not in self.overlay_dims):
new_style.pop(k)
self.param.warning(
'Specified %s dim transform %r could not be applied, '
'as not all dimensions could be resolved.' % (k, v))
continue
if v.dimension in self.overlay_dims:
ds = Dataset({d.name: v for d, v in self.overlay_dims.items()},
list(self.overlay_dims))
val = v.apply(ds, ranges=ranges, flat=True)[0]
elif isinstance(element, Path) and not isinstance(element, Contours):
val = np.concatenate([v.apply(el, ranges=ranges, flat=True)
for el in element.split()])
else:
val = v.apply(element, ranges=ranges, flat=True)
if (not util.isscalar(val) and len(util.unique_array(val)) == 1 and
((not 'color' in k or validate('color', val)) or k in self._nonvectorized_styles)):
val = val[0]
if not util.isscalar(val):
if k in self._nonvectorized_styles:
element = type(element).__name__
raise ValueError('Mapping a dimension to the "{style}" '
'style option is not supported by the '
'{element} element using the {backend} '
'backend. To map the "{dim}" dimension '
'to the {style} use a groupby operation '
'to overlay your data along the dimension.'.format(
style=k, dim=v.dimension, element=element,
backend=self.renderer.backend))
elif data and len(val) != len(list(data.values())[0]):
if isinstance(element, VectorField):
val = np.tile(val, 3)
elif isinstance(element, Path) and not isinstance(element, Contours):
val = val[:-1]
else:
continue
if k == 'angle':
val = np.deg2rad(val)
elif k.endswith('font_size'):
if util.isscalar(val) and isinstance(val, int):
val = str(v)+'pt'
elif isinstance(val, np.ndarray) and val.dtype.kind in 'ifu':
val = [str(int(s))+'pt' for s in val]
if util.isscalar(val):
key = val
else:
# Node marker does not handle {'field': ...}
key = k if k == 'node_marker' else {'field': k}
data[k] = val
# If color is not valid colorspec add colormapper
numeric = isinstance(val, util.arraylike_types) and val.dtype.kind in 'uifMmb'
colormap = style.get(prefix+'cmap')
if ('color' in k and isinstance(val, util.arraylike_types) and
(numeric or not validate('color', val) or isinstance(colormap, dict))):
kwargs = {}
if val.dtype.kind not in 'ifMu':
range_key = dim_range_key(v)
if range_key in ranges and 'factors' in ranges[range_key]:
factors = ranges[range_key]['factors']
else:
factors = util.unique_array(val)
if isinstance(val, util.arraylike_types) and val.dtype.kind == 'b':
factors = factors.astype(str)
kwargs['factors'] = factors
cmapper = self._get_colormapper(v, element, ranges,
dict(style), name=k+'_color_mapper',
group=group, **kwargs)
categorical = isinstance(cmapper, CategoricalColorMapper)
if categorical and val.dtype.kind in 'ifMub':
if v.dimension in element:
formatter = element.get_dimension(v.dimension).pprint_value
else:
formatter = str
field = k + '_str__'
data[k+'_str__'] = [formatter(d) for d in val]
else:
field = k
if categorical and getattr(self, 'show_legend', False):
legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend'
new_style[legend_prop] = field
key = {'field': field, 'transform': cmapper}
new_style[k] = key
# Process color/alpha styles and expand to fill/line style
for style, val in list(new_style.items()):
for s in ('alpha', 'color'):
if prefix+s != style or style not in data or validate(s, val, True):
continue
supports_fill = any(
o.startswith(prefix+'fill') and (prefix != 'edge_' or getattr(self, 'filled', True))
for o in self.style_opts)
for pprefix in [p+'_' for p in property_prefixes]+['']:
fill_key = prefix+pprefix+'fill_'+s
fill_style = new_style.get(fill_key)
# Do not override custom nonselection/muted alpha
if ((pprefix in ('nonselection_', 'muted_') and s == 'alpha')
or fill_key not in self.style_opts):
continue
# Override empty and non-vectorized fill_style if not hover style
hover = pprefix == 'hover_'
if ((fill_style is None or (validate(s, fill_style, True) and not hover))
and supports_fill):
new_style[fill_key] = val
line_key = prefix+pprefix+'line_'+s
line_style = new_style.get(line_key)
# If glyph has fill and line style is set overriding line color
if supports_fill and line_style is not None:
continue
# If glyph does not support fill override non-vectorized line_color
if ((line_style is not None and (validate(s, line_style) and not hover)) or
(line_style is None and not supports_fill)):
new_style[line_key] = val
return new_style
def _glyph_properties(self, plot, element, source, ranges, style, group=None):
properties = dict(style, source=source)
if self.show_legend:
if self.overlay_dims:
legend = ', '.join([d.pprint_value(v, print_unit=True) for d, v in
self.overlay_dims.items()])
else:
legend = element.label
if legend and self.overlaid:
legend_prop = 'legend_label' if bokeh_version >= '1.3.5' else 'legend'
properties[legend_prop] = legend
return properties
def _filter_properties(self, properties, glyph_type, allowed):
glyph_props = dict(properties)
for gtype in ((glyph_type, '') if glyph_type else ('',)):
for prop in ('color', 'alpha'):
glyph_prop = properties.get(gtype+prop)
if glyph_prop and ('line_'+prop not in glyph_props or gtype):
glyph_props['line_'+prop] = glyph_prop
if glyph_prop and ('fill_'+prop not in glyph_props or gtype):
glyph_props['fill_'+prop] = glyph_prop
props = {k[len(gtype):]: v for k, v in glyph_props.items()
if k.startswith(gtype)}
if self.batched:
glyph_props = dict(props, **glyph_props)
else:
glyph_props.update(props)
return {k: v for k, v in glyph_props.items() if k in allowed}
def _update_glyph(self, renderer, properties, mapping, glyph, source, data):
allowed_properties = glyph.properties()
properties = mpl_to_bokeh(properties)
merged = dict(properties, **mapping)
legend_props = ('legend_field', 'legend_label') if bokeh_version >= '1.3.5' else ('legend',)
for lp in legend_props:
legend = merged.pop(lp, None)
if legend is not None:
break
columns = list(source.data.keys())
glyph_updates = []
for glyph_type in ('', 'selection_', 'nonselection_', 'hover_', 'muted_'):
if renderer:
glyph = getattr(renderer, glyph_type+'glyph', None)
if not glyph or (not renderer and glyph_type):
continue
filtered = self._filter_properties(merged, glyph_type, allowed_properties)
# Ensure that data is populated before updating glyph
dataspecs = glyph.dataspecs()
for spec in dataspecs:
new_spec = filtered.get(spec)
old_spec = getattr(glyph, spec)
new_field = new_spec.get('field') if isinstance(new_spec, dict) else new_spec
old_field = old_spec.get('field') if isinstance(old_spec, dict) else old_spec
if (data is None) or (new_field not in data or new_field in source.data or new_field == old_field):
continue
columns.append(new_field)
glyph_updates.append((glyph, filtered))
# If a dataspec has changed and the CDS.data will be replaced
# the GlyphRenderer will not find the column, therefore we
# craft an event which will make the column available.
cds_replace = True if data is None else cds_column_replace(source, data)
if not cds_replace:
if not self.static_source:
self._update_datasource(source, data)
if hasattr(self, 'selected') and self.selected is not None:
self._update_selected(source)
elif self.document:
server = self.renderer.mode == 'server'
with hold_policy(self.document, 'collect', server=server):
empty_data = {c: [] for c in columns}
event = ModelChangedEvent(self.document, source, 'data',
source.data, empty_data, empty_data,
setter='empty')
self.document._held_events.append(event)
if legend is not None:
for leg in self.state.legend:
for item in leg.items:
if renderer in item.renderers:
if isinstance(legend, dict):
label = legend
elif lp != 'legend':
prop = 'value' if 'label' in lp else 'field'
label = {prop: legend}
elif isinstance(item.label, dict):
label = {list(item.label)[0]: legend}
else:
label = {'value': legend}
item.label = label
for glyph, update in glyph_updates:
glyph.update(**update)
if data is not None and cds_replace and not self.static_source:
self._update_datasource(source, data)
def _postprocess_hover(self, renderer, source):
"""
Attaches renderer to hover tool and processes tooltips to
ensure datetime data is displayed correctly.
"""
hover = self.handles.get('hover')
if hover is None:
return
if not isinstance(hover.tooltips, util.basestring) and 'hv_created' in hover.tags:
for k, values in source.data.items():
key = '@{%s}' % k
if key in hover.formatters:
continue
if ((isinstance(value, np.ndarray) and value.dtype.kind == 'M') or
(len(values) and isinstance(values[0], util.datetime_types))):
hover.tooltips = [(l, f+'{%F %T}' if f == key else f) for l, f in hover.tooltips]
hover.formatters[key] = "datetime"
if hover.renderers == 'auto':
hover.renderers = []
hover.renderers.append(renderer)
def _init_glyphs(self, plot, element, ranges, source):
style_element = element.last if self.batched else element
# Get data and initialize data source
if self.batched:
current_id = tuple(element.traverse(lambda x: x._plot_id, [Element]))
data, mapping, style = self.get_batched_data(element, ranges)
else:
style = self.style[self.cyclic_index]
data, mapping, style = self.get_data(element, ranges, style)
current_id = element._plot_id
with abbreviated_exception():
style = self._apply_transforms(element, data, ranges, style)
if source is None:
source = self._init_datasource(data)
self.handles['previous_id'] = current_id
self.handles['source'] = self.handles['cds'] = source
self.handles['selected'] = source.selected
properties = self._glyph_properties(plot, style_element, source, ranges, style)
if 'legend_label' in properties and 'legend_field' in mapping:
mapping.pop('legend_field')
with abbreviated_exception():
renderer, glyph = self._init_glyph(plot, mapping, properties)
self.handles['glyph'] = glyph
if isinstance(renderer, Renderer):
self.handles['glyph_renderer'] = renderer
self._postprocess_hover(renderer, source)
# Update plot, source and glyph
with abbreviated_exception():
self._update_glyph(renderer, properties, mapping, glyph, source, source.data)
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
if self.batched:
element = [el for el in self.hmap.data.values() if el][-1]
else:
element = self.hmap.last
key = util.wrap_tuple(self.hmap.last_key)
ranges = self.compute_ranges(self.hmap, key, ranges)
self.current_ranges = ranges
self.current_frame = element
self.current_key = key
style_element = element.last if self.batched else element
ranges = util.match_spec(style_element, ranges)
# Initialize plot, source and glyph
if plot is None:
plot = self._init_plot(key, style_element, ranges=ranges, plots=plots)
self._init_axes(plot)
else:
self.handles['xaxis'] = plot.xaxis[0]
self.handles['x_range'] = plot.x_range
self.handles['yaxis'] = plot.yaxis[0]
self.handles['y_range'] = plot.y_range
self.handles['plot'] = plot
self._init_glyphs(plot, element, ranges, source)
if not self.overlaid:
self._update_plot(key, plot, style_element)
self._update_ranges(style_element, ranges)
for cb in self.callbacks:
cb.initialize()
if self.top_level:
self.init_links()
if not self.overlaid:
self._set_active_tools(plot)
self._process_legend()
self._execute_hooks(element)
self.drawn = True
return plot
def _update_glyphs(self, element, ranges, style):
plot = self.handles['plot']
glyph = self.handles.get('glyph')
source = self.handles['source']
mapping = {}
# Cache frame object id to skip updating data if unchanged
previous_id = self.handles.get('previous_id', None)
if self.batched:
current_id = tuple(element.traverse(lambda x: x._plot_id, [Element]))
else:
current_id = element._plot_id
self.handles['previous_id'] = current_id
self.static_source = (self.dynamic and (current_id == previous_id))
if self.batched:
data, mapping, style = self.get_batched_data(element, ranges)
else:
data, mapping, style = self.get_data(element, ranges, style)
# Include old data if source static
if self.static_source:
for k, v in source.data.items():
if k not in data:
data[k] = v
elif not len(data[k]) and len(source.data):
data[k] = source.data[k]
with abbreviated_exception():
style = self._apply_transforms(element, data, ranges, style)
if glyph:
properties = self._glyph_properties(plot, element, source, ranges, style)
renderer = self.handles.get('glyph_renderer')
with abbreviated_exception():
self._update_glyph(renderer, properties, mapping, glyph, source, data)
elif not self.static_source:
self._update_datasource(source, data)
def _reset_ranges(self):
"""
Resets RangeXY streams if norm option is set to framewise
"""
if self.overlaid:
return
for el, callbacks in self.traverse(lambda x: (x.current_frame, x.callbacks)):
if el is None:
continue
for callback in callbacks:
norm = self.lookup_options(el, 'norm').options
if norm.get('framewise'):
for s in callback.streams:
if isinstance(s, RangeXY) and not s._triggering:
s.reset()
def update_frame(self, key, ranges=None, plot=None, element=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
self._reset_ranges()
reused = isinstance(self.hmap, DynamicMap) and (self.overlaid or self.batched)
if not reused and element is None:
element = self._get_frame(key)
elif element is not None:
self.current_key = key
self.current_frame = element
renderer = self.handles.get('glyph_renderer', None)
glyph = self.handles.get('glyph', None)
visible = element is not None
if hasattr(renderer, 'visible'):
renderer.visible = visible
if hasattr(glyph, 'visible'):
glyph.visible = visible
if ((self.batched and not element) or element is None or (not self.dynamic and self.static) or
(self.streaming and self.streaming[0].data is self.current_frame.data and not self.streaming[0]._triggering)):
return
if self.batched:
style_element = element.last
max_cycles = None
else:
style_element = element
max_cycles = self.style._max_cycles
style = self.lookup_options(style_element, 'style')
self.style = style.max_cycles(max_cycles) if max_cycles else style
if not self.overlaid:
ranges = self.compute_ranges(self.hmap, key, ranges)
else:
self.ranges.update(ranges)
self.param.set_param(**self.lookup_options(style_element, 'plot').options)
ranges = util.match_spec(style_element, ranges)
self.current_ranges = ranges
plot = self.handles['plot']
if not self.overlaid:
self._update_ranges(style_element, ranges)
self._update_plot(key, plot, style_element)
self._set_active_tools(plot)
self._updated = True
if 'hover' in self.handles:
self._update_hover(element)
self._update_glyphs(element, ranges, self.style[self.cyclic_index])
self._execute_hooks(element)
def model_changed(self, model):
"""
Determines if the bokeh model was just changed on the frontend.
Useful to suppress boomeranging events, e.g. when the frontend
just sent an update to the x_range this should not trigger an
update on the backend.
"""
callbacks = [cb for cbs in self.traverse(lambda x: x.callbacks)
for cb in cbs]
stream_metadata = [stream._metadata for cb in callbacks
for stream in cb.streams if stream._metadata]
return any(md['id'] == model.ref['id'] for models in stream_metadata
for md in models.values())
@property
def framewise(self):
"""
Property to determine whether the current frame should have
framewise normalization enabled. Required for bokeh plotting
classes to determine whether to send updated ranges for each
frame.
"""
current_frames = [el for f in self.traverse(lambda x: x.current_frame)
for el in (f.traverse(lambda x: x, [Element])
if f else [])]
current_frames = util.unique_iterator(current_frames)
return any(self.lookup_options(frame, 'norm').options.get('framewise')
for frame in current_frames)
class CompositeElementPlot(ElementPlot):
"""
A CompositeElementPlot is an Element plot type that coordinates
drawing of multiple glyphs.
"""
# Mapping between glyph names and style groups
_style_groups = {}
# Defines the order in which glyphs are drawn, defined by glyph name
_draw_order = []
def _init_glyphs(self, plot, element, ranges, source, data=None, mapping=None, style=None):
# Get data and initialize data source
if None in (data, mapping):
style = self.style[self.cyclic_index]
data, mapping, style = self.get_data(element, ranges, style)
keys = glyph_order(dict(data, **mapping), self._draw_order)
source_cache = {}
current_id = element._plot_id
self.handles['previous_id'] = current_id
for key in keys:
style_group = self._style_groups.get('_'.join(key.split('_')[:-1]))
group_style = dict(style)
ds_data = data.get(key, {})
with abbreviated_exception():
group_style = self._apply_transforms(element, ds_data, ranges, group_style, style_group)
if id(ds_data) in source_cache:
source = source_cache[id(ds_data)]
else:
source = self._init_datasource(ds_data)
source_cache[id(ds_data)] = source
self.handles[key+'_source'] = source
properties = self._glyph_properties(plot, element, source, ranges, group_style, style_group)
properties = self._process_properties(key, properties, mapping.get(key, {}))
with abbreviated_exception():
renderer, glyph = self._init_glyph(plot, mapping.get(key, {}), properties, key)
self.handles[key+'_glyph'] = glyph
if isinstance(renderer, Renderer):
self.handles[key+'_glyph_renderer'] = renderer
self._postprocess_hover(renderer, source)
# Update plot, source and glyph
with abbreviated_exception():
self._update_glyph(renderer, properties, mapping.get(key, {}), glyph,
source, source.data)
if getattr(self, 'colorbar', False):
for k, v in list(self.handles.items()):
if not k.endswith('color_mapper'):
continue
self._draw_colorbar(plot, v, k[:-12])
def _process_properties(self, key, properties, mapping):
key = '_'.join(key.split('_')[:-1]) if '_' in key else key
style_group = self._style_groups[key]
group_props = {}
for k, v in properties.items():
if k in self.style_opts:
group = k.split('_')[0]
if group == style_group:
if k in mapping:
v = mapping[k]
k = '_'.join(k.split('_')[1:])
else:
continue
group_props[k] = v
return group_props
def _update_glyphs(self, element, ranges, style):
plot = self.handles['plot']
# Cache frame object id to skip updating data if unchanged
previous_id = self.handles.get('previous_id', None)
if self.batched:
current_id = tuple(element.traverse(lambda x: x._plot_id, [Element]))
else:
current_id = element._plot_id
self.handles['previous_id'] = current_id
self.static_source = (self.dynamic and (current_id == previous_id))
data, mapping, style = self.get_data(element, ranges, style)
keys = glyph_order(dict(data, **mapping), self._draw_order)
for key in keys:
gdata = data.get(key)
source = self.handles[key+'_source']
glyph = self.handles.get(key+'_glyph')
if glyph:
group_style = dict(style)
style_group = self._style_groups.get('_'.join(key.split('_')[:-1]))
with abbreviated_exception():
group_style = self._apply_transforms(element, gdata, ranges, group_style, style_group)
properties = self._glyph_properties(plot, element, source, ranges, group_style, style_group)
properties = self._process_properties(key, properties, mapping[key])
renderer = self.handles.get(key+'_glyph_renderer')
with abbreviated_exception():
self._update_glyph(renderer, properties, mapping[key],
glyph, source, gdata)
elif not self.static_source and gdata is not None:
self._update_datasource(source, gdata)
def _init_glyph(self, plot, mapping, properties, key):
"""
Returns a Bokeh glyph object.
"""
properties = mpl_to_bokeh(properties)
plot_method = '_'.join(key.split('_')[:-1])
renderer = getattr(plot, plot_method)(**dict(properties, **mapping))
return renderer, renderer.glyph
class ColorbarPlot(ElementPlot):
"""
ColorbarPlot provides methods to create colormappers and colorbar
models which can be added to a glyph. Additionally it provides
parameters to control the position and other styling options of
the colorbar. The default colorbar_position options are defined
by the colorbar_specs, but may be overridden by the colorbar_opts.
"""
colorbar_specs = {'right': {'pos': 'right',
'opts': {'location': (0, 0)}},
'left': {'pos': 'left',
'opts':{'location':(0, 0)}},
'bottom': {'pos': 'below',
'opts': {'location': (0, 0),
'orientation':'horizontal'}},
'top': {'pos': 'above',
'opts': {'location':(0, 0),
'orientation':'horizontal'}},
'top_right': {'pos': 'center',
'opts': {'location': 'top_right'}},
'top_left': {'pos': 'center',
'opts': {'location': 'top_left'}},
'bottom_left': {'pos': 'center',
'opts': {'location': 'bottom_left',
'orientation': 'horizontal'}},
'bottom_right': {'pos': 'center',
'opts': {'location': 'bottom_right',
'orientation': 'horizontal'}}}
color_levels = param.ClassSelector(default=None, class_=(
(int, list) + ((range,) if sys.version_info.major > 2 else ())), doc="""
Number of discrete colors to use when colormapping or a set of color
intervals defining the range of values to map each color to.""")
cformatter = param.ClassSelector(
default=None, class_=(util.basestring, TickFormatter, FunctionType), doc="""
Formatter for ticks along the colorbar axis.""")
clabel = param.String(default=None, doc="""
An explicit override of the color bar label. If set, takes precedence
over the title key in colorbar_opts.""")
clim = param.Tuple(default=(np.nan, np.nan), length=2, doc="""
User-specified colorbar axis range limits for the plot, as a tuple (low,high).
If specified, takes precedence over data and dimension ranges.""")
clim_percentile = param.ClassSelector(default=False, class_=(int, float, bool), doc="""
Percentile value to compute colorscale robust to outliers. If
True, uses 2nd and 98th percentile; otherwise uses the specified
numerical percentile value.""")
cformatter = param.ClassSelector(
default=None, class_=(util.basestring, TickFormatter, FunctionType), doc="""
Formatter for ticks along the colorbar axis.""")
cnorm = param.ObjectSelector(default='linear', objects=['linear', 'log', 'eq_hist'], doc="""
Color normalization to be applied during colormapping.""")
colorbar = param.Boolean(default=False, doc="""
Whether to display a colorbar.""")
colorbar_position = param.ObjectSelector(objects=list(colorbar_specs),
default="right", doc="""
Allows selecting between a number of predefined colorbar position
options. The predefined options may be customized in the
colorbar_specs class attribute.""")
colorbar_opts = param.Dict(default={}, doc="""
Allows setting specific styling options for the colorbar overriding
the options defined in the colorbar_specs class attribute. Includes
location, orientation, height, width, scale_alpha, title, title_props,
margin, padding, background_fill_color and more.""")
clipping_colors = param.Dict(default={}, doc="""
Dictionary to specify colors for clipped values, allows
setting color for NaN values and for values above and below
the min and max value. The min, max or NaN color may specify
an RGB(A) color as a color hex string of the form #FFFFFF or
#FFFFFFFF or a length 3 or length 4 tuple specifying values in
the range 0-1 or a named HTML color.""")
logz = param.Boolean(default=False, doc="""
Whether to apply log scaling to the z-axis.""")
symmetric = param.Boolean(default=False, doc="""
Whether to make the colormap symmetric around zero.""")
_colorbar_defaults = dict(bar_line_color='black', label_standoff=8,
major_tick_line_color='black')
_default_nan = '#8b8b8b'
_nonvectorized_styles = base_properties + ['cmap', 'palette']
def _draw_colorbar(self, plot, color_mapper, prefix=''):
if CategoricalColorMapper and isinstance(color_mapper, CategoricalColorMapper):
return
if EqHistColorMapper and isinstance(color_mapper, EqHistColorMapper) and BinnedTicker:
ticker = BinnedTicker(mapper=color_mapper)
elif isinstance(color_mapper, LogColorMapper) and color_mapper.low > 0:
ticker = LogTicker()
else:
ticker = BasicTicker()
cbar_opts = dict(self.colorbar_specs[self.colorbar_position])
# Check if there is a colorbar in the same position
pos = cbar_opts['pos']
if any(isinstance(model, ColorBar) for model in getattr(plot, pos, [])):
return
if self.clabel:
self.colorbar_opts.update({'title': self.clabel})
if self.cformatter is not None:
self.colorbar_opts.update({'formatter': wrap_formatter(self.cformatter, 'c')})
for tk in ['cticks', 'ticks']:
ticksize = self._fontsize(tk, common=False).get('fontsize')
if ticksize is not None:
self.colorbar_opts.update({'major_label_text_font_size': ticksize})
break
for lb in ['clabel', 'labels']:
labelsize = self._fontsize(lb, common=False).get('fontsize')
if labelsize is not None:
self.colorbar_opts.update({'title_text_font_size': labelsize})
break
opts = dict(cbar_opts['opts'], color_mapper=color_mapper, ticker=ticker,
**self._colorbar_defaults)
color_bar = ColorBar(**dict(opts, **self.colorbar_opts))
plot.add_layout(color_bar, pos)
self.handles[prefix+'colorbar'] = color_bar
def _get_colormapper(self, eldim, element, ranges, style, factors=None, colors=None,
group=None, name='color_mapper'):
# The initial colormapper instance is cached the first time
# and then only updated
if eldim is None and colors is None:
return None
dim_name = dim_range_key(eldim)
# Attempt to find matching colormapper on the adjoined plot
if self.adjoined:
cmappers = self.adjoined.traverse(
lambda x: (x.handles.get('color_dim'),
x.handles.get(name),
[v for v in x.handles.values()
if isinstance(v, ColorMapper)])
)
cmappers = [(cmap, mappers) for cdim, cmap, mappers in cmappers
if cdim == eldim]
if cmappers:
cmapper, mappers = cmappers[0]
cmapper = cmapper if cmapper else mappers[0]
self.handles['color_mapper'] = cmapper
return cmapper
else:
return None
ncolors = None if factors is None else len(factors)
if eldim:
# check if there's an actual value (not np.nan)
if all(util.isfinite(cl) for cl in self.clim):
low, high = self.clim
elif dim_name in ranges:
if self.clim_percentile and 'robust' in ranges[dim_name]:
low, high = ranges[dim_name]['robust']
else:
low, high = ranges[dim_name]['combined']
dlow, dhigh = ranges[dim_name]['data']
if (util.is_int(low, int_like=True) and
util.is_int(high, int_like=True) and
util.is_int(dlow) and
util.is_int(dhigh)):
low, high = int(low), int(high)
elif isinstance(eldim, dim):
low, high = np.nan, np.nan
else:
low, high = element.range(eldim.name)
if self.symmetric:
sym_max = max(abs(low), high)
low, high = -sym_max, sym_max
low = self.clim[0] if util.isfinite(self.clim[0]) else low
high = self.clim[1] if util.isfinite(self.clim[1]) else high
else:
low, high = None, None
prefix = '' if group is None else group+'_'
cmap = colors or style.get(prefix+'cmap', style.get('cmap', 'viridis'))
nan_colors = {k: rgba_tuple(v) for k, v in self.clipping_colors.items()}
if isinstance(cmap, dict):
factors = list(cmap)
palette = [cmap.get(f, nan_colors.get('NaN', self._default_nan)) for f in factors]
if isinstance(eldim, dim):
if eldim.dimension in element:
formatter = element.get_dimension(eldim.dimension).pprint_value
else:
formatter = str
else:
formatter = eldim.pprint_value
factors = [formatter(f) for f in factors]
else:
categorical = ncolors is not None
if isinstance(self.color_levels, int):
ncolors = self.color_levels
elif isinstance(self.color_levels, list):
ncolors = len(self.color_levels) - 1
if isinstance(cmap, list) and len(cmap) != ncolors:
raise ValueError('The number of colors in the colormap '
'must match the intervals defined in the '
'color_levels, expected %d colors found %d.'
% (ncolors, len(cmap)))
palette = process_cmap(cmap, ncolors, categorical=categorical)
if isinstance(self.color_levels, list):
palette, (low, high) = color_intervals(palette, self.color_levels, clip=(low, high))
colormapper, opts = self._get_cmapper_opts(low, high, factors, nan_colors)
cmapper = self.handles.get(name)
if cmapper is not None:
if cmapper.palette != palette:
cmapper.palette = palette
opts = {k: opt for k, opt in opts.items()
if getattr(cmapper, k) != opt}
if opts:
cmapper.update(**opts)
else:
cmapper = colormapper(palette=palette, **opts)
self.handles[name] = cmapper
self.handles['color_dim'] = eldim
return cmapper
def _get_color_data(self, element, ranges, style, name='color', factors=None, colors=None,
int_categories=False):
data, mapping = {}, {}
cdim = element.get_dimension(self.color_index)
color = style.get(name, None)
if cdim and ((isinstance(color, util.basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for '%s' option and "
"declare a color_index; ignoring the color_index."
% name)
cdim = None
if not cdim:
return data, mapping
cdata = element.dimension_values(cdim)
field = util.dimension_sanitizer(cdim.name)
dtypes = 'iOSU' if int_categories else 'OSU'
if factors is None and (isinstance(cdata, list) or cdata.dtype.kind in dtypes):
range_key = dim_range_key(cdim)
if range_key in ranges and 'factors' in ranges[range_key]:
factors = ranges[range_key]['factors']
else:
factors = util.unique_array(cdata)
if factors is not None and int_categories and cdata.dtype.kind == 'i':
field += '_str__'
cdata = [str(f) for f in cdata]
factors = [str(f) for f in factors]
mapper = self._get_colormapper(cdim, element, ranges, style,
factors, colors)
if factors is None and isinstance(mapper, CategoricalColorMapper):
field += '_str__'
cdata = [cdim.pprint_value(c) for c in cdata]
factors = True
data[field] = cdata
if factors is not None and self.show_legend:
legend_prop = 'legend_field' if bokeh_version >= '1.3.5' else 'legend'
mapping[legend_prop] = field
mapping[name] = {'field': field, 'transform': mapper}
return data, mapping
def _get_cmapper_opts(self, low, high, factors, colors):
if factors is None:
if self.cnorm == 'linear':
colormapper = LinearColorMapper
if self.cnorm == 'log' or self.logz:
colormapper = LogColorMapper
if util.is_int(low) and util.is_int(high) and low == 0:
low = 1
if 'min' not in colors:
# Make integer 0 be transparent
colors['min'] = 'rgba(0, 0, 0, 0)'
elif util.is_number(low) and low <= 0:
self.param.warning(
"Log color mapper lower bound <= 0 and will not "
"render corrrectly. Ensure you set a positive "
"lower bound on the color dimension or using "
"the `clim` option."
)
elif self.cnorm == 'eq_hist':
if EqHistColorMapper is None:
raise ImportError("Could not import bokeh.models.EqHistColorMapper. "
"Note that the option cnorm='eq_hist' requires "
"bokeh 2.2.3 or higher.")
colormapper = EqHistColorMapper
if isinstance(low, (bool, np.bool_)): low = int(low)
if isinstance(high, (bool, np.bool_)): high = int(high)
# Pad zero-range to avoid breaking colorbar (as of bokeh 1.0.4)
if low == high:
offset = self.default_span / 2
low -= offset
high += offset
opts = {}
if util.isfinite(low):
opts['low'] = low
if util.isfinite(high):
opts['high'] = high
color_opts = [('NaN', 'nan_color'), ('max', 'high_color'), ('min', 'low_color')]
opts.update({opt: colors[name] for name, opt in color_opts if name in colors})
else:
colormapper = CategoricalColorMapper
factors = decode_bytes(factors)
opts = dict(factors=list(factors))
if 'NaN' in colors:
opts['nan_color'] = colors['NaN']
return colormapper, opts
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object and optionally creates a colorbar.
"""
ret = super(ColorbarPlot, self)._init_glyph(plot, mapping, properties)
if self.colorbar:
for k, v in list(self.handles.items()):
if not k.endswith('color_mapper'):
continue
self._draw_colorbar(plot, v, k[:-12])
return ret
class LegendPlot(ElementPlot):
legend_position = param.ObjectSelector(objects=["top_right",
"top_left",
"bottom_left",
"bottom_right",
'right', 'left',
'top', 'bottom'],
default="top_right",
doc="""
Allows selecting between a number of predefined legend position
options. The predefined options may be customized in the
legend_specs class attribute.""")
legend_muted = param.Boolean(default=False, doc="""
Controls whether the legend entries are muted by default.""")
legend_offset = param.NumericTuple(default=(0, 0), doc="""
If legend is placed outside the axis, this determines the
(width, height) offset in pixels from the original position.""")
legend_cols = param.Integer(default=False, doc="""
Whether to lay out the legend as columns.""")
legend_specs = {'right': 'right', 'left': 'left', 'top': 'above',
'bottom': 'below'}
legend_opts = param.Dict(default={}, doc="""
Allows setting specific styling options for the colorbar.""")
def _process_legend(self, plot=None):
plot = plot or self.handles['plot']
if not plot.legend:
return
legend = plot.legend[0]
cmappers = [cmapper for cmapper in self.handles.values()
if isinstance(cmapper, CategoricalColorMapper)]
categorical = bool(cmappers)
if ((not categorical and not self.overlaid and len(legend.items) == 1)
or not self.show_legend):
legend.items[:] = []
else:
plot.legend.orientation = 'horizontal' if self.legend_cols else 'vertical'
pos = self.legend_position
if pos in self.legend_specs:
plot.legend[:] = []
legend.location = self.legend_offset
if pos in ['top', 'bottom']:
plot.legend.orientation = 'horizontal'
plot.add_layout(legend, self.legend_specs[pos])
else:
legend.location = pos
# Apply muting and misc legend opts
for leg in plot.legend:
leg.update(**self.legend_opts)
for item in leg.items:
for r in item.renderers:
r.muted = self.legend_muted
class AnnotationPlot(object):
"""
Mix-in plotting subclass for AnnotationPlots which do not have a legend.
"""
class OverlayPlot(GenericOverlayPlot, LegendPlot):
tabs = param.Boolean(default=False, doc="""
Whether to display overlaid plots in separate panes""")
style_opts = (legend_dimensions + ['border_'+p for p in line_properties] +
text_properties + ['background_fill_color', 'background_fill_alpha'])
multiple_legends = param.Boolean(default=False, doc="""
Whether to split the legend for subplots into multiple legends.""")
_propagate_options = ['width', 'height', 'xaxis', 'yaxis', 'labelled',
'bgcolor', 'fontsize', 'invert_axes', 'show_frame',
'show_grid', 'logx', 'logy', 'xticks', 'toolbar',
'yticks', 'xrotation', 'yrotation', 'lod',
'border', 'invert_xaxis', 'invert_yaxis', 'sizing_mode',
'title', 'title_format', 'legend_position', 'legend_offset',
'legend_cols', 'gridstyle', 'legend_muted', 'padding',
'xlabel', 'ylabel', 'xlim', 'ylim', 'zlim',
'xformatter', 'yformatter', 'active_tools',
'min_height', 'max_height', 'min_width', 'min_height',
'margin', 'aspect', 'data_aspect', 'frame_width',
'frame_height', 'responsive', 'fontscale']
@property
def _x_range_type(self):
for v in self.subplots.values():
if not isinstance(v._x_range_type, Range1d):
return v._x_range_type
return self._x_range_type
@property
def _y_range_type(self):
for v in self.subplots.values():
if not isinstance(v._y_range_type, Range1d):
return v._y_range_type
return self._y_range_type
def _process_legend(self, overlay):
plot = self.handles['plot']
subplots = self.traverse(lambda x: x, [lambda x: x is not self])
legend_plots = any(p is not None for p in subplots
if isinstance(p, LegendPlot) and
not isinstance(p, OverlayPlot))
non_annotation = [p for p in subplots if not
(isinstance(p, OverlayPlot) or isinstance(p, AnnotationPlot))]
if (not self.show_legend or len(plot.legend) == 0 or
(len(non_annotation) <= 1 and not (self.dynamic or legend_plots))):
return super(OverlayPlot, self)._process_legend()
elif not plot.legend:
return
legend = plot.legend[0]
options = {}
properties = self.lookup_options(self.hmap.last, 'style')[self.cyclic_index]
for k, v in properties.items():
if k in line_properties and 'line' not in k:
ksplit = k.split('_')
k = '_'.join(ksplit[:1]+'line'+ksplit[1:])
if k in text_properties:
k = 'label_' + k
if k.startswith('legend_'):
k = k[7:]
options[k] = v
pos = self.legend_position
orientation = 'horizontal' if self.legend_cols else 'vertical'
if pos in ['top', 'bottom']:
orientation = 'horizontal'
options['orientation'] = orientation
if overlay is not None and overlay.kdims:
title = ', '.join([d.label for d in overlay.kdims])
options['title'] = title
options.update(self._fontsize('legend', 'label_text_font_size'))
options.update(self._fontsize('legend_title', 'title_text_font_size'))
legend.update(**options)
if pos in self.legend_specs:
pos = self.legend_specs[pos]
else:
legend.location = pos
if 'legend_items' not in self.handles:
self.handles['legend_items'] = []
legend_items = self.handles['legend_items']
legend_labels = {tuple(sorted(i.label.items())) if isinstance(i.label, dict) else i.label: i
for i in legend_items}
for item in legend.items:
label = tuple(sorted(item.label.items())) if isinstance(item.label, dict) else item.label
if not label or (isinstance(item.label, dict) and not item.label.get('value', True)):
continue
if label in legend_labels:
prev_item = legend_labels[label]
prev_item.renderers[:] = list(util.unique_iterator(prev_item.renderers+item.renderers))
else:
legend_labels[label] = item
legend_items.append(item)
if item not in self.handles['legend_items']:
self.handles['legend_items'].append(item)
# Ensure that each renderer is only singly referenced by a legend item
filtered = []
renderers = []
for item in legend_items:
item.renderers[:] = [r for r in item.renderers if r not in renderers]
if item in filtered or not item.renderers or not any(r.visible for r in item.renderers):
continue
renderers += item.renderers
filtered.append(item)
legend.items[:] = list(util.unique_iterator(filtered))
if self.multiple_legends:
remove_legend(plot, legend)
properties = legend.properties_with_values(include_defaults=False)
legend_group = []
for item in legend.items:
if not isinstance(item.label, dict) or 'value' in item.label:
legend_group.append(item)
continue
new_legend = Legend(**dict(properties, items=[item]))
new_legend.location = self.legend_offset
plot.add_layout(new_legend, pos)
if legend_group:
new_legend = Legend(**dict(properties, items=legend_group))
new_legend.location = self.legend_offset
plot.add_layout(new_legend, pos)
legend.items[:] = []
elif pos in ['above', 'below', 'right', 'left']:
remove_legend(plot, legend)
legend.location = self.legend_offset
plot.add_layout(legend, pos)
# Apply muting and misc legend opts
for leg in plot.legend:
leg.update(**self.legend_opts)
for item in leg.items:
for r in item.renderers:
r.muted = self.legend_muted
def _init_tools(self, element, callbacks=[]):
"""
Processes the list of tools to be supplied to the plot.
"""
hover_tools = {}
init_tools, tool_types = [], []
for key, subplot in self.subplots.items():
el = element.get(key)
if el is not None:
el_tools = subplot._init_tools(el, self.callbacks)
for tool in el_tools:
if isinstance(tool, util.basestring):
tool_type = TOOL_TYPES.get(tool)
else:
tool_type = type(tool)
if isinstance(tool, tools.HoverTool):
tooltips = tuple(tool.tooltips) if tool.tooltips else ()
if tooltips in hover_tools:
continue
else:
hover_tools[tooltips] = tool
elif tool_type in tool_types:
continue
else:
tool_types.append(tool_type)
init_tools.append(tool)
self.handles['hover_tools'] = hover_tools
return init_tools
def _merge_tools(self, subplot):
"""
Merges tools on the overlay with those on the subplots.
"""
if self.batched and 'hover' in subplot.handles:
self.handles['hover'] = subplot.handles['hover']
elif 'hover' in subplot.handles and 'hover_tools' in self.handles:
hover = subplot.handles['hover']
if hover.tooltips and not isinstance(hover.tooltips, util.basestring):
tooltips = tuple((name, spec.replace('{%F %T}', '')) for name, spec in hover.tooltips)
else:
tooltips = ()
tool = self.handles['hover_tools'].get(tooltips)
if tool:
tool_renderers = [] if tool.renderers == 'auto' else tool.renderers
hover_renderers = [] if hover.renderers == 'auto' else hover.renderers
renderers = tool_renderers + hover_renderers
tool.renderers = list(util.unique_iterator(renderers))
if 'hover' not in self.handles:
self.handles['hover'] = tool
def _get_factors(self, overlay, ranges):
xfactors, yfactors = [], []
for k, sp in self.subplots.items():
el = overlay.data.get(k)
if el is not None:
elranges = util.match_spec(el, ranges)
xfs, yfs = sp._get_factors(el, elranges)
xfactors.append(xfs)
yfactors.append(yfs)
if xfactors:
xfactors = np.concatenate(xfactors)
if yfactors:
yfactors = np.concatenate(yfactors)
return util.unique_array(xfactors), util.unique_array(yfactors)
def _get_axis_dims(self, element):
subplots = list(self.subplots.values())
if subplots:
return subplots[0]._get_axis_dims(element)
return super(OverlayPlot, self)._get_axis_dims(element)
def initialize_plot(self, ranges=None, plot=None, plots=None):
key = util.wrap_tuple(self.hmap.last_key)
nonempty = [(k, el) for k, el in self.hmap.data.items() if el]
if not nonempty:
raise SkipRendering('All Overlays empty, cannot initialize plot.')
dkey, element = nonempty[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
self.tabs = self.tabs or any(isinstance(sp, TablePlot) for sp in self.subplots.values())
if plot is None and not self.tabs and not self.batched:
plot = self._init_plot(key, element, ranges=ranges, plots=plots)
self._init_axes(plot)
self.handles['plot'] = plot
if plot and not self.overlaid:
self._update_plot(key, plot, element)
self._update_ranges(element, ranges)
panels = []
for key, subplot in self.subplots.items():
frame = None
if self.tabs:
subplot.overlaid = False
child = subplot.initialize_plot(ranges, plot, plots)
if isinstance(element, CompositeOverlay):
# Ensure that all subplots are in the same state
frame = element.get(key, None)
subplot.current_frame = frame
subplot.current_key = dkey
if self.batched:
self.handles['plot'] = child
if self.tabs:
title = subplot._format_title(key, dimensions=False)
if not title:
title = get_tab_title(key, frame, self.hmap.last)
panels.append(Panel(child=child, title=title))
self._merge_tools(subplot)
if self.tabs:
self.handles['plot'] = Tabs(
tabs=panels, width=self.width, height=self.height,
min_width=self.min_width, min_height=self.min_height,
max_width=self.max_width, max_height=self.max_height,
sizing_mode='fixed'
)
elif not self.overlaid:
self._process_legend(element)
self._set_active_tools(plot)
self.drawn = True
self.handles['plots'] = plots
self._update_callbacks(self.handles['plot'])
if 'plot' in self.handles and not self.tabs:
plot = self.handles['plot']
self.handles['xaxis'] = plot.xaxis[0]
self.handles['yaxis'] = plot.yaxis[0]
self.handles['x_range'] = plot.x_range
self.handles['y_range'] = plot.y_range
for cb in self.callbacks:
cb.initialize()
if self.top_level:
self.init_links()
self._execute_hooks(element)
return self.handles['plot']
def update_frame(self, key, ranges=None, element=None):
"""
Update the internal state of the Plot to represent the given
key tuple (where integers represent frames). Returns this
state.
"""
self._reset_ranges()
reused = isinstance(self.hmap, DynamicMap) and self.overlaid
if not reused and element is None:
element = self._get_frame(key)
elif element is not None:
self.current_frame = element
self.current_key = key
items = [] if element is None else list(element.data.items())
if isinstance(self.hmap, DynamicMap):
range_obj = element
else:
range_obj = self.hmap
if element is not None:
ranges = self.compute_ranges(range_obj, key, ranges)
# Update plot options
plot_opts = self.lookup_options(element, 'plot').options
inherited = self._traverse_options(element, 'plot',
self._propagate_options,
defaults=False)
plot_opts.update(**{k: v[0] for k, v in inherited.items() if k not in plot_opts})
self.param.set_param(**plot_opts)
if not self.overlaid and not self.tabs and not self.batched:
self._update_ranges(element, ranges)
# Determine which stream (if any) triggered the update
triggering = [stream for stream in self.streams if stream._triggering]
for k, subplot in self.subplots.items():
el = None
# If in Dynamic mode propagate elements to subplots
if isinstance(self.hmap, DynamicMap) and element:
# In batched mode NdOverlay is passed to subplot directly
if self.batched:
el = element
# If not batched get the Element matching the subplot
elif element is not None:
idx, spec, exact = self._match_subplot(k, subplot, items, element)
if idx is not None:
_, el = items.pop(idx)
if not exact:
self._update_subplot(subplot, spec)
# Skip updates to subplots when its streams is not one of
# the streams that initiated the update
if (triggering and all(s not in triggering for s in subplot.streams) and
not subplot in self.dynamic_subplots):
continue
subplot.update_frame(key, ranges, element=el)
if not self.batched and isinstance(self.hmap, DynamicMap) and items:
init_kwargs = {'plots': self.handles['plots']}
if not self.tabs:
init_kwargs['plot'] = self.handles['plot']
self._create_dynamic_subplots(key, items, ranges, **init_kwargs)
if not self.overlaid and not self.tabs:
self._process_legend(element)
if element and not self.overlaid and not self.tabs and not self.batched:
plot = self.handles['plot']
self._update_plot(key, plot, element)
self._set_active_tools(plot)
self._updated = True
self._process_legend(element)
self._execute_hooks(element)
| 1 | 24,190 | SyntaxError I think | holoviz-holoviews | py |
@@ -5012,6 +5012,9 @@ TEST_F(VkLayerTest, ValidateGeometryNV) {
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
+#if 0
+ // XXX Subtest disabled because this is the wrong VUID.
+ // No VUIDs currently exist to require memory is bound (spec bug).
// Invalid vertex buffer - no memory bound.
{
VkGeometryNV geometry = valid_geometry_triangles; | 1 | /*
* Copyright (c) 2015-2020 The Khronos Group Inc.
* Copyright (c) 2015-2020 Valve Corporation
* Copyright (c) 2015-2020 LunarG, Inc.
* Copyright (c) 2015-2020 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Courtney Goeltzenleuchter <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Mike Stroyan <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Tony Barbour <[email protected]>
* Author: Cody Northrop <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Jeremy Kniager <[email protected]>
* Author: Shannon McPherson <[email protected]>
* Author: John Zulauf <[email protected]>
*/
#include "cast_utils.h"
#include "layer_validation_tests.h"
TEST_F(VkLayerTest, RequiredParameter) {
TEST_DESCRIPTION("Specify VK_NULL_HANDLE, NULL, and 0 for required handle, pointer, array, and array count parameters");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter pFeatures specified as NULL");
// Specify NULL for a pointer to a handle
// Expected to trigger an error with
// parameter_validation::validate_required_pointer
vk::GetPhysicalDeviceFeatures(gpu(), NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter pQueueFamilyPropertyCount specified as NULL");
// Specify NULL for pointer to array count
// Expected to trigger an error with parameter_validation::validate_array
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), NULL, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewport-viewportCount-arraylength");
// Specify 0 for a required array count
// Expected to trigger an error with parameter_validation::validate_array
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
m_commandBuffer->SetViewport(0, 0, &viewport);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCreateImage-pCreateInfo-parameter");
// Specify a null pImageCreateInfo struct pointer
VkImage test_image;
vk::CreateImage(device(), NULL, NULL, &test_image);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewport-pViewports-parameter");
// Specify NULL for a required array
// Expected to trigger an error with parameter_validation::validate_array
m_commandBuffer->SetViewport(0, 1, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter memory specified as VK_NULL_HANDLE");
// Specify VK_NULL_HANDLE for a required handle
// Expected to trigger an error with
// parameter_validation::validate_required_handle
vk::UnmapMemory(device(), VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter pFences[0] specified as VK_NULL_HANDLE");
// Specify VK_NULL_HANDLE for a required handle array entry
// Expected to trigger an error with
// parameter_validation::validate_required_handle_array
VkFence fence = VK_NULL_HANDLE;
vk::ResetFences(device(), 1, &fence);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "required parameter pAllocateInfo specified as NULL");
// Specify NULL for a required struct pointer
// Expected to trigger an error with
// parameter_validation::validate_struct_type
VkDeviceMemory memory = VK_NULL_HANDLE;
vk::AllocateMemory(device(), NULL, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "value of faceMask must not be 0");
// Specify 0 for a required VkFlags parameter
// Expected to trigger an error with parameter_validation::validate_flags
m_commandBuffer->SetStencilReference(0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "value of pSubmits[0].pWaitDstStageMask[0] must not be 0");
// Specify 0 for a required VkFlags array entry
// Expected to trigger an error with
// parameter_validation::validate_flags_array
VkSemaphore semaphore = VK_NULL_HANDLE;
VkPipelineStageFlags stageFlags = 0;
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = &semaphore;
submitInfo.pWaitDstStageMask = &stageFlags;
vk::QueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-sType-sType");
stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
// Set a bogus sType and see what happens
submitInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = &semaphore;
submitInfo.pWaitDstStageMask = &stageFlags;
vk::QueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitSemaphores-parameter");
stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.waitSemaphoreCount = 1;
// Set a null pointer for pWaitSemaphores
submitInfo.pWaitSemaphores = NULL;
submitInfo.pWaitDstStageMask = &stageFlags;
vk::QueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCreateRenderPass-pCreateInfo-parameter");
VkRenderPass render_pass;
vk::CreateRenderPass(device(), nullptr, nullptr, &render_pass);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, PnextOnlyStructValidation) {
TEST_DESCRIPTION("See if checks occur on structs ONLY used in pnext chains.");
if (!(CheckDescriptorIndexingSupportAndInitFramework(this, m_instance_extension_names, m_device_extension_names, NULL,
m_errorMonitor))) {
printf("Descriptor indexing or one of its dependencies not supported, skipping tests\n");
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
// Create a device passing in a bad PdevFeatures2 value
auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
// Set one of the features values to an invalid boolean value
indexing_features.descriptorBindingUniformBufferUpdateAfterBind = 800;
uint32_t queue_node_count;
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_node_count, NULL);
VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count];
vk::GetPhysicalDeviceQueueFamilyProperties(gpu(), &queue_node_count, queue_props);
float priorities[] = {1.0f};
VkDeviceQueueCreateInfo queue_info{};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pNext = NULL;
queue_info.flags = 0;
queue_info.queueFamilyIndex = 0;
queue_info.queueCount = 1;
queue_info.pQueuePriorities = &priorities[0];
VkDeviceCreateInfo dev_info = {};
dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
dev_info.pNext = NULL;
dev_info.queueCreateInfoCount = 1;
dev_info.pQueueCreateInfos = &queue_info;
dev_info.enabledLayerCount = 0;
dev_info.ppEnabledLayerNames = NULL;
dev_info.enabledExtensionCount = m_device_extension_names.size();
dev_info.ppEnabledExtensionNames = m_device_extension_names.data();
dev_info.pNext = &features2;
VkDevice dev;
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "is neither VK_TRUE nor VK_FALSE");
m_errorMonitor->SetUnexpectedError("Failed to create");
vk::CreateDevice(gpu(), &dev_info, NULL, &dev);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ReservedParameter) {
TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, " must be 0");
// Specify 0 for a reserved VkFlags parameter
// Expected to trigger an error with
// parameter_validation::validate_reserved_flags
VkEvent event_handle = VK_NULL_HANDLE;
VkEventCreateInfo event_info = {};
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_info.flags = 1;
vk::CreateEvent(device(), &event_info, NULL, &event_handle);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DebugMarkerNameTest) {
TEST_DESCRIPTION("Ensure debug marker object names are printed in debug report output");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), kValidationLayerName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_DEBUG_MARKER_EXTENSION_NAME);
} else {
printf("%s Debug Marker Extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkDebugMarkerSetObjectNameEXT fpvkDebugMarkerSetObjectNameEXT =
(PFN_vkDebugMarkerSetObjectNameEXT)vk::GetInstanceProcAddr(instance(), "vkDebugMarkerSetObjectNameEXT");
if (!(fpvkDebugMarkerSetObjectNameEXT)) {
printf("%s Can't find fpvkDebugMarkerSetObjectNameEXT; skipped.\n", kSkipPrefix);
return;
}
if (DeviceSimulation()) {
printf("%sSkipping object naming test.\n", kSkipPrefix);
return;
}
VkBuffer buffer;
VkDeviceMemory memory_1, memory_2;
std::string memory_name = "memory_name";
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.size = 1;
vk::CreateBuffer(device(), &buffer_create_info, nullptr, &buffer);
VkMemoryRequirements memRequirements;
vk::GetBufferMemoryRequirements(device(), buffer, &memRequirements);
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.allocationSize = memRequirements.size;
memory_allocate_info.memoryTypeIndex = 0;
vk::AllocateMemory(device(), &memory_allocate_info, nullptr, &memory_1);
vk::AllocateMemory(device(), &memory_allocate_info, nullptr, &memory_2);
VkDebugMarkerObjectNameInfoEXT name_info = {};
name_info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT;
name_info.pNext = nullptr;
name_info.object = (uint64_t)memory_2;
name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT;
name_info.pObjectName = memory_name.c_str();
fpvkDebugMarkerSetObjectNameEXT(device(), &name_info);
vk::BindBufferMemory(device(), buffer, memory_1, 0);
// Test core_validation layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, memory_name);
vk::BindBufferMemory(device(), buffer, memory_2, 0);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), memory_1, nullptr);
memory_1 = VK_NULL_HANDLE;
vk::FreeMemory(device(), memory_2, nullptr);
memory_2 = VK_NULL_HANDLE;
vk::DestroyBuffer(device(), buffer, nullptr);
buffer = VK_NULL_HANDLE;
VkCommandBuffer commandBuffer;
std::string commandBuffer_name = "command_buffer_name";
VkCommandPool commandpool_1;
VkCommandPool commandpool_2;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_1);
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_2);
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = commandpool_1;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(device(), &command_buffer_allocate_info, &commandBuffer);
name_info.object = (uint64_t)commandBuffer;
name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT;
name_info.pObjectName = commandBuffer_name.c_str();
fpvkDebugMarkerSetObjectNameEXT(device(), &name_info);
VkCommandBufferBeginInfo cb_begin_Info = {};
cb_begin_Info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cb_begin_Info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vk::BeginCommandBuffer(commandBuffer, &cb_begin_Info);
const VkRect2D scissor = {{-1, 0}, {16, 16}};
const VkRect2D scissors[] = {scissor, scissor};
// Test parameter_validation layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, commandBuffer_name);
vk::CmdSetScissor(commandBuffer, 1, 1, scissors);
m_errorMonitor->VerifyFound();
// Test object_tracker layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, commandBuffer_name);
vk::FreeCommandBuffers(device(), commandpool_2, 1, &commandBuffer);
m_errorMonitor->VerifyFound();
vk::DestroyCommandPool(device(), commandpool_1, NULL);
vk::DestroyCommandPool(device(), commandpool_2, NULL);
}
TEST_F(VkLayerTest, DebugUtilsNameTest) {
TEST_DESCRIPTION("Ensure debug utils object names are printed in debug messenger output");
// Skip test if extension not supported
if (InstanceExtensionSupported(VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
} else {
printf("%s Debug Utils Extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkSetDebugUtilsObjectNameEXT fpvkSetDebugUtilsObjectNameEXT =
(PFN_vkSetDebugUtilsObjectNameEXT)vk::GetInstanceProcAddr(instance(), "vkSetDebugUtilsObjectNameEXT");
ASSERT_TRUE(fpvkSetDebugUtilsObjectNameEXT); // Must be extant if extension is enabled
PFN_vkCreateDebugUtilsMessengerEXT fpvkCreateDebugUtilsMessengerEXT =
(PFN_vkCreateDebugUtilsMessengerEXT)vk::GetInstanceProcAddr(instance(), "vkCreateDebugUtilsMessengerEXT");
ASSERT_TRUE(fpvkCreateDebugUtilsMessengerEXT); // Must be extant if extension is enabled
PFN_vkDestroyDebugUtilsMessengerEXT fpvkDestroyDebugUtilsMessengerEXT =
(PFN_vkDestroyDebugUtilsMessengerEXT)vk::GetInstanceProcAddr(instance(), "vkDestroyDebugUtilsMessengerEXT");
ASSERT_TRUE(fpvkDestroyDebugUtilsMessengerEXT); // Must be extant if extension is enabled
PFN_vkCmdInsertDebugUtilsLabelEXT fpvkCmdInsertDebugUtilsLabelEXT =
(PFN_vkCmdInsertDebugUtilsLabelEXT)vk::GetInstanceProcAddr(instance(), "vkCmdInsertDebugUtilsLabelEXT");
ASSERT_TRUE(fpvkCmdInsertDebugUtilsLabelEXT); // Must be extant if extension is enabled
if (DeviceSimulation()) {
printf("%sSkipping object naming test.\n", kSkipPrefix);
return;
}
DebugUtilsLabelCheckData callback_data;
auto empty_callback = [](const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData, DebugUtilsLabelCheckData *data) {
data->count++;
};
callback_data.count = 0;
callback_data.callback = empty_callback;
auto callback_create_info = lvl_init_struct<VkDebugUtilsMessengerCreateInfoEXT>();
callback_create_info.messageSeverity =
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
callback_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
callback_create_info.pfnUserCallback = DebugUtilsCallback;
callback_create_info.pUserData = &callback_data;
VkDebugUtilsMessengerEXT my_messenger = VK_NULL_HANDLE;
fpvkCreateDebugUtilsMessengerEXT(instance(), &callback_create_info, nullptr, &my_messenger);
VkBuffer buffer;
VkDeviceMemory memory_1, memory_2;
std::string memory_name = "memory_name";
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.size = 1;
vk::CreateBuffer(device(), &buffer_create_info, nullptr, &buffer);
VkMemoryRequirements memRequirements;
vk::GetBufferMemoryRequirements(device(), buffer, &memRequirements);
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.allocationSize = memRequirements.size;
memory_allocate_info.memoryTypeIndex = 0;
vk::AllocateMemory(device(), &memory_allocate_info, nullptr, &memory_1);
vk::AllocateMemory(device(), &memory_allocate_info, nullptr, &memory_2);
VkDebugUtilsObjectNameInfoEXT name_info = {};
name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
name_info.pNext = nullptr;
name_info.objectType = VK_OBJECT_TYPE_DEVICE_MEMORY;
name_info.pObjectName = memory_name.c_str();
// Pass in bad handle make sure ObjectTracker catches it
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDebugUtilsObjectNameInfoEXT-objectType-02590");
name_info.objectHandle = (uint64_t)0xcadecade;
fpvkSetDebugUtilsObjectNameEXT(device(), &name_info);
m_errorMonitor->VerifyFound();
// Pass in 'unknown' object type and see if parameter validation catches it
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDebugUtilsObjectNameInfoEXT-objectType-02589");
name_info.objectHandle = (uint64_t)memory_2;
name_info.objectType = VK_OBJECT_TYPE_UNKNOWN;
fpvkSetDebugUtilsObjectNameEXT(device(), &name_info);
m_errorMonitor->VerifyFound();
name_info.objectType = VK_OBJECT_TYPE_DEVICE_MEMORY;
fpvkSetDebugUtilsObjectNameEXT(device(), &name_info);
vk::BindBufferMemory(device(), buffer, memory_1, 0);
// Test core_validation layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, memory_name);
vk::BindBufferMemory(device(), buffer, memory_2, 0);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), memory_1, nullptr);
memory_1 = VK_NULL_HANDLE;
vk::FreeMemory(device(), memory_2, nullptr);
memory_2 = VK_NULL_HANDLE;
vk::DestroyBuffer(device(), buffer, nullptr);
buffer = VK_NULL_HANDLE;
VkCommandBuffer commandBuffer;
std::string commandBuffer_name = "command_buffer_name";
VkCommandPool commandpool_1;
VkCommandPool commandpool_2;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_1);
vk::CreateCommandPool(device(), &pool_create_info, nullptr, &commandpool_2);
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = commandpool_1;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(device(), &command_buffer_allocate_info, &commandBuffer);
name_info.objectHandle = (uint64_t)commandBuffer;
name_info.objectType = VK_OBJECT_TYPE_COMMAND_BUFFER;
name_info.pObjectName = commandBuffer_name.c_str();
fpvkSetDebugUtilsObjectNameEXT(device(), &name_info);
VkCommandBufferBeginInfo cb_begin_Info = {};
cb_begin_Info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cb_begin_Info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vk::BeginCommandBuffer(commandBuffer, &cb_begin_Info);
const VkRect2D scissor = {{-1, 0}, {16, 16}};
const VkRect2D scissors[] = {scissor, scissor};
auto command_label = lvl_init_struct<VkDebugUtilsLabelEXT>();
command_label.pLabelName = "Command Label 0123";
command_label.color[0] = 0.;
command_label.color[1] = 1.;
command_label.color[2] = 2.;
command_label.color[3] = 3.0;
bool command_label_test = false;
auto command_label_callback = [command_label, &command_label_test](const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
DebugUtilsLabelCheckData *data) {
data->count++;
command_label_test = false;
if (pCallbackData->cmdBufLabelCount == 1) {
command_label_test = pCallbackData->pCmdBufLabels[0] == command_label;
}
};
callback_data.callback = command_label_callback;
fpvkCmdInsertDebugUtilsLabelEXT(commandBuffer, &command_label);
// Test parameter_validation layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, commandBuffer_name);
vk::CmdSetScissor(commandBuffer, 1, 1, scissors);
m_errorMonitor->VerifyFound();
// Check the label test
if (!command_label_test) {
ADD_FAILURE() << "Command label '" << command_label.pLabelName << "' not passed to callback.";
}
// Test object_tracker layer
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, commandBuffer_name);
vk::FreeCommandBuffers(device(), commandpool_2, 1, &commandBuffer);
m_errorMonitor->VerifyFound();
vk::DestroyCommandPool(device(), commandpool_1, NULL);
vk::DestroyCommandPool(device(), commandpool_2, NULL);
fpvkDestroyDebugUtilsMessengerEXT(instance(), my_messenger, nullptr);
}
TEST_F(VkLayerTest, InvalidStructSType) {
TEST_DESCRIPTION("Specify an invalid VkStructureType for a Vulkan structure's sType field");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "parameter pAllocateInfo->sType must be");
// Zero struct memory, effectively setting sType to
// VK_STRUCTURE_TYPE_APPLICATION_INFO
// Expected to trigger an error with
// parameter_validation::validate_struct_type
VkMemoryAllocateInfo alloc_info = {};
VkDeviceMemory memory = VK_NULL_HANDLE;
vk::AllocateMemory(device(), &alloc_info, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "parameter pSubmits[0].sType must be");
// Zero struct memory, effectively setting sType to
// VK_STRUCTURE_TYPE_APPLICATION_INFO
// Expected to trigger an error with
// parameter_validation::validate_struct_type_array
VkSubmitInfo submit_info = {};
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidStructPNext) {
TEST_DESCRIPTION("Specify an invalid value for a Vulkan structure's pNext field");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, "value of pCreateInfo->pNext must be NULL");
// Set VkMemoryAllocateInfo::pNext to a non-NULL value, when pNext must be NULL.
// Need to pick a function that has no allowed pNext structure types.
// Expected to trigger an error with parameter_validation::validate_struct_pnext
VkEvent event = VK_NULL_HANDLE;
VkEventCreateInfo event_alloc_info = {};
// Zero-initialization will provide the correct sType
VkApplicationInfo app_info = {};
event_alloc_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_alloc_info.pNext = &app_info;
vk::CreateEvent(device(), &event_alloc_info, NULL, &event);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, " chain includes a structure with unexpected VkStructureType ");
// Set VkMemoryAllocateInfo::pNext to a non-NULL value, but use
// a function that has allowed pNext structure types and specify
// a structure type that is not allowed.
// Expected to trigger an error with parameter_validation::validate_struct_pnext
VkDeviceMemory memory = VK_NULL_HANDLE;
VkMemoryAllocateInfo memory_alloc_info = {};
memory_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_alloc_info.pNext = &app_info;
vk::AllocateMemory(device(), &memory_alloc_info, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kWarningBit, " chain includes a structure with unexpected VkStructureType ");
// Same concept as above, but unlike vkAllocateMemory where VkMemoryAllocateInfo is a const
// in vkGetPhysicalDeviceProperties2, VkPhysicalDeviceProperties2 is not a const
VkPhysicalDeviceProperties2 physical_device_properties2 = {};
physical_device_properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
physical_device_properties2.pNext = &app_info;
vk::GetPhysicalDeviceProperties2(gpu(), &physical_device_properties2);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueOutOfRange) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit,
"does not fall within the begin..end range of the core VkFormat enumeration tokens");
// Specify an invalid VkFormat value
// Expected to trigger an error with
// parameter_validation::validate_ranged_enum
VkFormatProperties format_properties;
vk::GetPhysicalDeviceFormatProperties(gpu(), static_cast<VkFormat>(8000), &format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadMask) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "contains flag bits that are not recognized members of");
// Specify an invalid VkFlags bitmask value
// Expected to trigger an error with parameter_validation::validate_flags
VkImageFormatProperties image_format_properties;
vk::GetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
static_cast<VkImageUsageFlags>(1 << 25), 0, &image_format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadFlag) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "contains flag bits that are not recognized members of");
// Specify an invalid VkFlags array entry
// Expected to trigger an error with parameter_validation::validate_flags_array
VkSemaphore semaphore;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore);
// `stage_flags` is set to a value which, currently, is not a defined stage flag
// `VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM` works well for this
VkPipelineStageFlags stage_flags = VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM;
// `waitSemaphoreCount` *must* be greater than 0 to perform this check
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = &stage_flags;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadBool) {
// Make sure using VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE doesn't trigger a false positive.
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
} else {
printf("%s VK_KHR_sampler_mirror_clamp_to_edge extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Specify an invalid VkBool32 value, expecting a warning with parameter_validation::validate_bool32
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
// Not VK_TRUE or VK_FALSE
sampler_info.anisotropyEnable = 3;
CreateSamplerTest(*this, &sampler_info, "is neither VK_TRUE nor VK_FALSE");
}
TEST_F(VkLayerTest, UnrecognizedValueMaxEnum) {
ASSERT_NO_FATAL_FAILURE(Init());
// Specify MAX_ENUM
VkFormatProperties format_properties;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "does not fall within the begin..end range");
vk::GetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_MAX_ENUM, &format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SubmitSignaledFence) {
vk_testing::Fence testFence;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "submitted in SIGNALED state. Fences must be reset before being submitted");
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.pNext = NULL;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color);
m_commandBuffer->end();
testFence.init(*m_device, fenceInfo);
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, testFence.handle());
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, LeakAnObject) {
TEST_DESCRIPTION("Create a fence and destroy its device without first destroying the fence.");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Workaround for overzealous layers checking even the guaranteed 0th queue family
const auto q_props = vk_testing::PhysicalDevice(gpu()).queue_properties();
ASSERT_TRUE(q_props.size() > 0);
ASSERT_TRUE(q_props[0].queueCount > 0);
const float q_priority[] = {1.0f};
VkDeviceQueueCreateInfo queue_ci = {};
queue_ci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_ci.queueFamilyIndex = 0;
queue_ci.queueCount = 1;
queue_ci.pQueuePriorities = q_priority;
VkDeviceCreateInfo device_ci = {};
device_ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_ci.queueCreateInfoCount = 1;
device_ci.pQueueCreateInfos = &queue_ci;
VkDevice leaky_device;
ASSERT_VK_SUCCESS(vk::CreateDevice(gpu(), &device_ci, nullptr, &leaky_device));
const VkFenceCreateInfo fence_ci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO};
VkFence leaked_fence;
ASSERT_VK_SUCCESS(vk::CreateFence(leaky_device, &fence_ci, nullptr, &leaked_fence));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyDevice-device-00378");
vk::DestroyDevice(leaky_device, nullptr);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UseObjectWithWrongDevice) {
TEST_DESCRIPTION(
"Try to destroy a render pass object using a device other than the one it was created on. This should generate a distinct "
"error from the invalid handle error.");
// Create first device and renderpass
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create second device
float priorities[] = {1.0f};
VkDeviceQueueCreateInfo queue_info{};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pNext = NULL;
queue_info.flags = 0;
queue_info.queueFamilyIndex = 0;
queue_info.queueCount = 1;
queue_info.pQueuePriorities = &priorities[0];
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = 1;
device_create_info.pQueueCreateInfos = &queue_info;
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.pEnabledFeatures = &features;
VkDevice second_device;
ASSERT_VK_SUCCESS(vk::CreateDevice(gpu(), &device_create_info, NULL, &second_device));
// Try to destroy the renderpass from the first device using the second device
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyRenderPass-renderPass-parent");
vk::DestroyRenderPass(second_device, m_renderPass, NULL);
m_errorMonitor->VerifyFound();
vk::DestroyDevice(second_device, NULL);
}
TEST_F(VkLayerTest, InvalidAllocationCallbacks) {
TEST_DESCRIPTION("Test with invalid VkAllocationCallbacks");
ASSERT_NO_FATAL_FAILURE(Init());
// vk::CreateInstance, and vk::CreateDevice tend to crash in the Loader Trampoline ATM, so choosing vk::CreateCommandPool
const VkCommandPoolCreateInfo cpci = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr, 0,
DeviceObj()->QueueFamilyMatching(0, 0, true)};
VkCommandPool cmdPool;
struct Alloc {
static VKAPI_ATTR void *VKAPI_CALL alloc(void *, size_t, size_t, VkSystemAllocationScope) { return nullptr; };
static VKAPI_ATTR void *VKAPI_CALL realloc(void *, void *, size_t, size_t, VkSystemAllocationScope) { return nullptr; };
static VKAPI_ATTR void VKAPI_CALL free(void *, void *){};
static VKAPI_ATTR void VKAPI_CALL internalAlloc(void *, size_t, VkInternalAllocationType, VkSystemAllocationScope){};
static VKAPI_ATTR void VKAPI_CALL internalFree(void *, size_t, VkInternalAllocationType, VkSystemAllocationScope){};
};
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnAllocation-00632");
const VkAllocationCallbacks allocator = {nullptr, nullptr, Alloc::realloc, Alloc::free, nullptr, nullptr};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnReallocation-00633");
const VkAllocationCallbacks allocator = {nullptr, Alloc::alloc, nullptr, Alloc::free, nullptr, nullptr};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnFree-00634");
const VkAllocationCallbacks allocator = {nullptr, Alloc::alloc, Alloc::realloc, nullptr, nullptr, nullptr};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnInternalAllocation-00635");
const VkAllocationCallbacks allocator = {nullptr, Alloc::alloc, Alloc::realloc, Alloc::free, nullptr, Alloc::internalFree};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAllocationCallbacks-pfnInternalAllocation-00635");
const VkAllocationCallbacks allocator = {nullptr, Alloc::alloc, Alloc::realloc, Alloc::free, Alloc::internalAlloc, nullptr};
vk::CreateCommandPool(device(), &cpci, &allocator, &cmdPool);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, MismatchedQueueFamiliesOnSubmit) {
TEST_DESCRIPTION(
"Submit command buffer created using one queue family and attempt to submit them on a queue created in a different queue "
"family.");
ASSERT_NO_FATAL_FAILURE(Init()); // assumes it initializes all queue families on vk::CreateDevice
// This test is meaningless unless we have multiple queue families
auto queue_family_properties = m_device->phy().queue_properties();
std::vector<uint32_t> queue_families;
for (uint32_t i = 0; i < queue_family_properties.size(); ++i)
if (queue_family_properties[i].queueCount > 0) queue_families.push_back(i);
if (queue_families.size() < 2) {
printf("%s Device only has one queue family; skipped.\n", kSkipPrefix);
return;
}
const uint32_t queue_family = queue_families[0];
const uint32_t other_queue_family = queue_families[1];
VkQueue other_queue;
vk::GetDeviceQueue(m_device->device(), other_queue_family, 0, &other_queue);
VkCommandPoolObj cmd_pool(m_device, queue_family);
VkCommandBufferObj cmd_buff(m_device, &cmd_pool);
cmd_buff.begin();
cmd_buff.end();
// Submit on the wrong queue
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cmd_buff.handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkQueueSubmit-pCommandBuffers-00074");
vk::QueueSubmit(other_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, TemporaryExternalSemaphore) {
#ifdef _WIN32
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR;
#else
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external semaphore instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for external semaphore device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Check for external semaphore import and export capability
VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr,
handle_type};
VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr};
auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR =
(PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR");
vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp);
if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) ||
!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
VkResult err;
// Create a semaphore to export payload from
VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type};
VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0};
VkSemaphore export_semaphore;
err = vk::CreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore);
ASSERT_VK_SUCCESS(err);
// Create a semaphore to import payload into
sci.pNext = nullptr;
VkSemaphore import_semaphore;
err = vk::CreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore);
ASSERT_VK_SUCCESS(err);
#ifdef _WIN32
// Export semaphore payload to an opaque handle
HANDLE handle = nullptr;
VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore,
handle_type};
auto vkGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR");
err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle);
ASSERT_VK_SUCCESS(err);
// Import opaque handle exported above *temporarily*
VkImportSemaphoreWin32HandleInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR,
nullptr,
import_semaphore,
VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR,
handle_type,
handle,
nullptr};
auto vkImportSemaphoreWin32HandleKHR =
(PFN_vkImportSemaphoreWin32HandleKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR");
err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi);
ASSERT_VK_SUCCESS(err);
#else
// Export semaphore payload to an opaque handle
int fd = 0;
VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type};
auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR");
err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd);
ASSERT_VK_SUCCESS(err);
// Import opaque handle exported above *temporarily*
VkImportSemaphoreFdInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore,
VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd};
auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR");
err = vkImportSemaphoreFdKHR(m_device->device(), &ihi);
ASSERT_VK_SUCCESS(err);
#endif
// Wait on the imported semaphore twice in vk::QueueSubmit, the second wait should be an error
VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
VkSubmitInfo si[] = {
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr},
};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "has no way to be signaled");
vk::QueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
auto index = m_device->graphics_queue_node_index_;
if (m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) {
// Wait on the imported semaphore twice in vk::QueueBindSparse, the second wait should be an error
VkBindSparseInfo bi[] = {
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr},
};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "has no way to be signaled");
vk::QueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
// Cleanup
err = vk::QueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
vk::DestroySemaphore(m_device->device(), export_semaphore, nullptr);
vk::DestroySemaphore(m_device->device(), import_semaphore, nullptr);
}
TEST_F(VkLayerTest, TemporaryExternalFence) {
#ifdef _WIN32
const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR;
#else
const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external fence instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External fence extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for external fence device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME);
} else {
printf("%s External fence extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Check for external fence import and export capability
VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type};
VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr};
auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR");
vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp);
if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) ||
!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
VkResult err;
// Create a fence to export payload from
VkFence export_fence;
{
VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type};
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0};
err = vk::CreateFence(m_device->device(), &fci, nullptr, &export_fence);
ASSERT_VK_SUCCESS(err);
}
// Create a fence to import payload into
VkFence import_fence;
{
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0};
err = vk::CreateFence(m_device->device(), &fci, nullptr, &import_fence);
ASSERT_VK_SUCCESS(err);
}
#ifdef _WIN32
// Export fence payload to an opaque handle
HANDLE handle = nullptr;
{
VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type};
auto vkGetFenceWin32HandleKHR =
(PFN_vkGetFenceWin32HandleKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR");
err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle);
ASSERT_VK_SUCCESS(err);
}
// Import opaque handle exported above
{
VkImportFenceWin32HandleInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR,
nullptr,
import_fence,
VK_FENCE_IMPORT_TEMPORARY_BIT_KHR,
handle_type,
handle,
nullptr};
auto vkImportFenceWin32HandleKHR =
(PFN_vkImportFenceWin32HandleKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR");
err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi);
ASSERT_VK_SUCCESS(err);
}
#else
// Export fence payload to an opaque handle
int fd = 0;
{
VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type};
auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR");
err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd);
ASSERT_VK_SUCCESS(err);
}
// Import opaque handle exported above
{
VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence,
VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd};
auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR");
err = vkImportFenceFdKHR(m_device->device(), &ifi);
ASSERT_VK_SUCCESS(err);
}
#endif
// Undo the temporary import
vk::ResetFences(m_device->device(), 1, &import_fence);
// Signal the previously imported fence twice, the second signal should produce a validation error
vk::QueueSubmit(m_device->m_queue, 0, nullptr, import_fence);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "is already in use by another submission.");
vk::QueueSubmit(m_device->m_queue, 0, nullptr, import_fence);
m_errorMonitor->VerifyFound();
// Cleanup
err = vk::QueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
vk::DestroyFence(m_device->device(), export_fence, nullptr);
vk::DestroyFence(m_device->device(), import_fence, nullptr);
}
TEST_F(VkLayerTest, InvalidCmdBufferEventDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an event dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo evci = {};
evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
VkResult result = vk::CreateEvent(m_device->device(), &evci, NULL, &event);
ASSERT_VK_SUCCESS(result);
m_commandBuffer->begin();
vk::CmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkEvent");
// Destroy event dependency prior to submit to cause ERROR
vk::DestroyEvent(m_device->device(), event, NULL);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidCmdBufferQueryPoolDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a query pool dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkQueryPool query_pool;
VkQueryPoolCreateInfo qpci{};
qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
qpci.queryType = VK_QUERY_TYPE_TIMESTAMP;
qpci.queryCount = 1;
VkResult result = vk::CreateQueryPool(m_device->device(), &qpci, nullptr, &query_pool);
ASSERT_VK_SUCCESS(result);
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkQueryPool");
// Destroy query pool dependency prior to submit to cause ERROR
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DeviceFeature2AndVertexAttributeDivisorExtensionUnenabled) {
TEST_DESCRIPTION(
"Test unenabled VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME & "
"VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME.");
VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vadf = {};
vadf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT;
VkPhysicalDeviceFeatures2 pd_features2 = {};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &vadf;
ASSERT_NO_FATAL_FAILURE(Init());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &pd_features2;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
VkDevice testDevice;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit,
"VK_KHR_get_physical_device_properties2 must be enabled when it creates an instance");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VK_EXT_vertex_attribute_divisor must be enabled when it creates a device");
m_errorMonitor->SetUnexpectedError("Failed to create device chain");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, Features12AndpNext) {
TEST_DESCRIPTION("Test VkPhysicalDeviceVulkan12Features and illegal struct in pNext");
SetTargetApiVersion(VK_API_VERSION_1_2);
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s Vulkan12Struct requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME) ||
!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_8BIT_STORAGE_EXTENSION_NAME) ||
!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_16BIT_STORAGE_EXTENSION_NAME)) {
printf("%s Storage Extension(s) not supported, skipping tests\n", kSkipPrefix);
return;
}
VkPhysicalDevice16BitStorageFeatures sixteen_bit = {};
sixteen_bit.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES;
sixteen_bit.storageBuffer16BitAccess = true;
VkPhysicalDeviceVulkan11Features features11 = {};
features11.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES;
features11.pNext = &sixteen_bit;
features11.storageBuffer16BitAccess = true;
VkPhysicalDevice8BitStorageFeatures eight_bit = {};
eight_bit.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES;
eight_bit.pNext = &features11;
eight_bit.storageBuffer8BitAccess = true;
VkPhysicalDeviceVulkan12Features features12 = {};
features12.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
features12.pNext = &eight_bit;
features12.storageBuffer8BitAccess = true;
vk_testing::PhysicalDevice physical_device(gpu());
vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties());
std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
auto qci = queue_info.data();
for (uint32_t i = 0; i < queue_info.size(); ++i) {
if (qci[i].queueCount) {
create_queue_infos.push_back(qci[i]);
}
}
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &features12;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
VkDevice testDevice;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-pNext-02829");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-pNext-02830");
m_errorMonitor->SetUnexpectedError("Failed to create device chain");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, BeginQueryOnTimestampPool) {
TEST_DESCRIPTION("Call CmdBeginQuery on a TIMESTAMP query pool.");
ASSERT_NO_FATAL_FAILURE(Init());
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryType-02804");
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vk::BeginCommandBuffer(m_commandBuffer->handle(), &begin_info);
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vk::EndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, SwapchainAcquireImageNoSync) {
TEST_DESCRIPTION("Test vkAcquireNextImageKHR with VK_NULL_HANDLE semaphore and fence");
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkAcquireNextImageKHR-semaphore-01780");
uint32_t dummy;
vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, VK_NULL_HANDLE, &dummy);
m_errorMonitor->VerifyFound();
}
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireImageNoSync2KHR) {
TEST_DESCRIPTION("Test vkAcquireNextImage2KHR with VK_NULL_HANDLE semaphore and fence");
SetTargetApiVersion(VK_API_VERSION_1_1);
bool extension_dependency_satisfied = false;
if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME);
extension_dependency_satisfied = true;
} else if (m_instance_api_version < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (extension_dependency_satisfied && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAcquireNextImageInfoKHR-semaphore-01782");
VkAcquireNextImageInfoKHR acquire_info = {VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR};
acquire_info.swapchain = m_swapchain;
acquire_info.timeout = UINT64_MAX;
acquire_info.semaphore = VK_NULL_HANDLE;
acquire_info.fence = VK_NULL_HANDLE;
acquire_info.deviceMask = 0x1;
uint32_t dummy;
vk::AcquireNextImage2KHR(device(), &acquire_info, &dummy);
m_errorMonitor->VerifyFound();
}
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireImageNoBinarySemaphore) {
TEST_DESCRIPTION("Test vkAcquireNextImageKHR with non-binary semaphore");
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkAcquireNextImageKHR-semaphore-03265");
uint32_t image_i;
vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, semaphore, VK_NULL_HANDLE, &image_i);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireImageNoBinarySemaphore2KHR) {
TEST_DESCRIPTION("Test vkAcquireNextImage2KHR with non-binary semaphore");
TEST_DESCRIPTION("Test vkAcquireNextImage2KHR with VK_NULL_HANDLE semaphore and fence");
SetTargetApiVersion(VK_API_VERSION_1_1);
bool extension_dependency_satisfied = false;
if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME);
extension_dependency_satisfied = true;
} else if (m_instance_api_version < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (extension_dependency_satisfied && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkAcquireNextImageInfoKHR acquire_info = {};
acquire_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR;
acquire_info.swapchain = m_swapchain;
acquire_info.timeout = UINT64_MAX;
acquire_info.semaphore = semaphore;
acquire_info.deviceMask = 0x1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAcquireNextImageInfoKHR-semaphore-03266");
uint32_t image_i;
vk::AcquireNextImage2KHR(device(), &acquire_info, &image_i);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireTooManyImages) {
TEST_DESCRIPTION("Acquiring invalid amount of images from the swapchain.");
if (!AddSurfaceInstanceExtension()) return;
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) return;
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
uint32_t image_count;
ASSERT_VK_SUCCESS(vk::GetSwapchainImagesKHR(device(), m_swapchain, &image_count, nullptr));
VkSurfaceCapabilitiesKHR caps;
ASSERT_VK_SUCCESS(vk::GetPhysicalDeviceSurfaceCapabilitiesKHR(gpu(), m_surface, &caps));
const uint32_t acquirable_count = image_count - caps.minImageCount + 1;
std::vector<VkFenceObj> fences(acquirable_count);
for (uint32_t i = 0; i < acquirable_count; ++i) {
fences[i].init(*m_device, VkFenceObj::create_info());
uint32_t image_i = i; // WORKAROUND: MockICD does not modify the value, so we have to or the validator state gets corrupted
const auto res = vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, fences[i].handle(), &image_i);
ASSERT_TRUE(res == VK_SUCCESS || res == VK_SUBOPTIMAL_KHR);
}
VkFenceObj error_fence;
error_fence.init(*m_device, VkFenceObj::create_info());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkAcquireNextImageKHR-swapchain-01802");
uint32_t image_i;
vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, error_fence.handle(), &image_i);
m_errorMonitor->VerifyFound();
// Cleanup
vk::WaitForFences(device(), fences.size(), MakeVkHandles<VkFence>(fences).data(), VK_TRUE, UINT64_MAX);
DestroySwapchain();
}
TEST_F(VkLayerTest, SwapchainAcquireTooManyImages2KHR) {
TEST_DESCRIPTION("Acquiring invalid amount of images from the swapchain via vkAcquireNextImage2KHR.");
SetTargetApiVersion(VK_API_VERSION_1_1);
bool extension_dependency_satisfied = false;
if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME);
extension_dependency_satisfied = true;
} else if (m_instance_api_version < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSurfaceInstanceExtension()) return;
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (extension_dependency_satisfied && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
} else if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s vkAcquireNextImage2KHR not supported, skipping test\n", kSkipPrefix);
return;
}
if (!AddSwapchainDeviceExtension()) return;
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_TRUE(InitSwapchain());
uint32_t image_count;
ASSERT_VK_SUCCESS(vk::GetSwapchainImagesKHR(device(), m_swapchain, &image_count, nullptr));
VkSurfaceCapabilitiesKHR caps;
ASSERT_VK_SUCCESS(vk::GetPhysicalDeviceSurfaceCapabilitiesKHR(gpu(), m_surface, &caps));
const uint32_t acquirable_count = image_count - caps.minImageCount + 1;
std::vector<VkFenceObj> fences(acquirable_count);
for (uint32_t i = 0; i < acquirable_count; ++i) {
fences[i].init(*m_device, VkFenceObj::create_info());
uint32_t image_i = i; // WORKAROUND: MockICD does not modify the value, so we have to or the validator state gets corrupted
const auto res = vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, fences[i].handle(), &image_i);
ASSERT_TRUE(res == VK_SUCCESS || res == VK_SUBOPTIMAL_KHR);
}
VkFenceObj error_fence;
error_fence.init(*m_device, VkFenceObj::create_info());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkAcquireNextImage2KHR-swapchain-01803");
VkAcquireNextImageInfoKHR acquire_info = {VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR};
acquire_info.swapchain = m_swapchain;
acquire_info.timeout = UINT64_MAX;
acquire_info.fence = error_fence.handle();
acquire_info.deviceMask = 0x1;
uint32_t image_i;
vk::AcquireNextImage2KHR(device(), &acquire_info, &image_i);
m_errorMonitor->VerifyFound();
// Cleanup
vk::WaitForFences(device(), fences.size(), MakeVkHandles<VkFence>(fences).data(), VK_TRUE, UINT64_MAX);
DestroySwapchain();
}
TEST_F(VkLayerTest, InvalidDeviceMask) {
TEST_DESCRIPTION("Invalid deviceMask.");
SetTargetApiVersion(VK_API_VERSION_1_1);
bool support_surface = true;
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping VkAcquireNextImageInfoKHR test\n", kSkipPrefix);
support_surface = false;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (support_surface) {
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping BindSwapchainImageMemory test\n", kSkipPrefix);
support_surface = false;
}
}
if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s Device Groups requires Vulkan 1.1+, skipping test\n", kSkipPrefix);
return;
}
uint32_t physical_device_group_count = 0;
vk::EnumeratePhysicalDeviceGroups(instance(), &physical_device_group_count, nullptr);
if (physical_device_group_count == 0) {
printf("%s physical_device_group_count is 0, skipping test\n", kSkipPrefix);
return;
}
std::vector<VkPhysicalDeviceGroupProperties> physical_device_group(physical_device_group_count,
{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES});
vk::EnumeratePhysicalDeviceGroups(instance(), &physical_device_group_count, physical_device_group.data());
VkDeviceGroupDeviceCreateInfo create_device_pnext = {};
create_device_pnext.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO;
create_device_pnext.physicalDeviceCount = physical_device_group[0].physicalDeviceCount;
create_device_pnext.pPhysicalDevices = physical_device_group[0].physicalDevices;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &create_device_pnext, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!InitSwapchain()) {
printf("%s Cannot create surface or swapchain, skipping VkAcquireNextImageInfoKHR test\n", kSkipPrefix);
support_surface = false;
}
// Test VkMemoryAllocateFlagsInfo
VkMemoryAllocateFlagsInfo alloc_flags_info = {};
alloc_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
alloc_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT;
alloc_flags_info.deviceMask = 0xFFFFFFFF;
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = &alloc_flags_info;
alloc_info.memoryTypeIndex = 0;
alloc_info.allocationSize = 32;
VkDeviceMemory mem;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675");
vk::AllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
m_errorMonitor->VerifyFound();
alloc_flags_info.deviceMask = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676");
vk::AllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
m_errorMonitor->VerifyFound();
// Test VkDeviceGroupCommandBufferBeginInfo
VkDeviceGroupCommandBufferBeginInfo dev_grp_cmd_buf_info = {};
dev_grp_cmd_buf_info.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO;
dev_grp_cmd_buf_info.deviceMask = 0xFFFFFFFF;
VkCommandBufferBeginInfo cmd_buf_info = {};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_buf_info.pNext = &dev_grp_cmd_buf_info;
m_commandBuffer->reset();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106");
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_buf_info);
m_errorMonitor->VerifyFound();
dev_grp_cmd_buf_info.deviceMask = 0;
m_commandBuffer->reset();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107");
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_buf_info);
m_errorMonitor->VerifyFound();
// Test VkDeviceGroupRenderPassBeginInfo
dev_grp_cmd_buf_info.deviceMask = 0x00000001;
m_commandBuffer->reset();
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_buf_info);
VkDeviceGroupRenderPassBeginInfo dev_grp_rp_info = {};
dev_grp_rp_info.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO;
dev_grp_rp_info.deviceMask = 0xFFFFFFFF;
m_renderPassBeginInfo.pNext = &dev_grp_rp_info;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907");
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
dev_grp_rp_info.deviceMask = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906");
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
dev_grp_rp_info.deviceMask = 0x00000001;
dev_grp_rp_info.deviceRenderAreaCount = physical_device_group[0].physicalDeviceCount + 1;
std::vector<VkRect2D> device_render_areas(dev_grp_rp_info.deviceRenderAreaCount, m_renderPassBeginInfo.renderArea);
dev_grp_rp_info.pDeviceRenderAreas = device_render_areas.data();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908");
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
// Test vk::CmdSetDeviceMask()
vk::CmdSetDeviceMask(m_commandBuffer->handle(), 0x00000001);
dev_grp_rp_info.deviceRenderAreaCount = physical_device_group[0].physicalDeviceCount;
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDeviceMask-deviceMask-00108");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDeviceMask-deviceMask-00110");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDeviceMask-deviceMask-00111");
vk::CmdSetDeviceMask(m_commandBuffer->handle(), 0xFFFFFFFF);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDeviceMask-deviceMask-00109");
vk::CmdSetDeviceMask(m_commandBuffer->handle(), 0);
m_errorMonitor->VerifyFound();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkSemaphore semaphore2;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore2));
VkFenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
VkFence fence;
ASSERT_VK_SUCCESS(vk::CreateFence(m_device->device(), &fence_create_info, nullptr, &fence));
if (support_surface) {
// Test VkAcquireNextImageInfoKHR
uint32_t imageIndex = 0;
VkAcquireNextImageInfoKHR acquire_next_image_info = {};
acquire_next_image_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR;
acquire_next_image_info.semaphore = semaphore;
acquire_next_image_info.swapchain = m_swapchain;
acquire_next_image_info.fence = fence;
acquire_next_image_info.deviceMask = 0xFFFFFFFF;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01290");
vk::AcquireNextImage2KHR(m_device->device(), &acquire_next_image_info, &imageIndex);
m_errorMonitor->VerifyFound();
vk::WaitForFences(m_device->device(), 1, &fence, VK_TRUE, std::numeric_limits<int>::max());
vk::ResetFences(m_device->device(), 1, &fence);
acquire_next_image_info.semaphore = semaphore2;
acquire_next_image_info.deviceMask = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291");
vk::AcquireNextImage2KHR(m_device->device(), &acquire_next_image_info, &imageIndex);
m_errorMonitor->VerifyFound();
DestroySwapchain();
}
// Test VkDeviceGroupSubmitInfo
VkDeviceGroupSubmitInfo device_group_submit_info = {};
device_group_submit_info.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO;
device_group_submit_info.commandBufferCount = 1;
std::array<uint32_t, 1> command_buffer_device_masks = {0xFFFFFFFF};
device_group_submit_info.pCommandBufferDeviceMasks = command_buffer_device_masks.data();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &device_group_submit_info;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_commandBuffer->reset();
vk::BeginCommandBuffer(m_commandBuffer->handle(), &cmd_buf_info);
vk::EndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::WaitForFences(m_device->device(), 1, &fence, VK_TRUE, std::numeric_limits<int>::max());
vk::DestroyFence(m_device->device(), fence, nullptr);
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
vk::DestroySemaphore(m_device->device(), semaphore2, nullptr);
}
TEST_F(VkLayerTest, ValidationCacheTestBadMerge) {
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), kValidationLayerName, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
} else {
printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Load extension functions
auto fpCreateValidationCache =
(PFN_vkCreateValidationCacheEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCreateValidationCacheEXT");
auto fpDestroyValidationCache =
(PFN_vkDestroyValidationCacheEXT)vk::GetDeviceProcAddr(m_device->device(), "vkDestroyValidationCacheEXT");
auto fpMergeValidationCaches =
(PFN_vkMergeValidationCachesEXT)vk::GetDeviceProcAddr(m_device->device(), "vkMergeValidationCachesEXT");
if (!fpCreateValidationCache || !fpDestroyValidationCache || !fpMergeValidationCaches) {
printf("%s Failed to load function pointers for %s\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
return;
}
VkValidationCacheCreateInfoEXT validationCacheCreateInfo;
validationCacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT;
validationCacheCreateInfo.pNext = NULL;
validationCacheCreateInfo.initialDataSize = 0;
validationCacheCreateInfo.pInitialData = NULL;
validationCacheCreateInfo.flags = 0;
VkValidationCacheEXT validationCache = VK_NULL_HANDLE;
VkResult res = fpCreateValidationCache(m_device->device(), &validationCacheCreateInfo, nullptr, &validationCache);
ASSERT_VK_SUCCESS(res);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkMergeValidationCachesEXT-dstCache-01536");
res = fpMergeValidationCaches(m_device->device(), validationCache, 1, &validationCache);
m_errorMonitor->VerifyFound();
fpDestroyValidationCache(m_device->device(), validationCache, nullptr);
}
TEST_F(VkLayerTest, InvalidQueueFamilyIndex) {
// Miscellaneous queueFamilyIndex validation tests
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 1024;
buffCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buffCI.queueFamilyIndexCount = 2;
// Introduce failure by specifying invalid queue_family_index
uint32_t qfi[2];
qfi[0] = 777;
qfi[1] = 0;
buffCI.pQueueFamilyIndices = qfi;
buffCI.sharingMode = VK_SHARING_MODE_CONCURRENT; // qfi only matters in CONCURRENT mode
// Test for queue family index out of range
CreateBufferTest(*this, &buffCI, "VUID-VkBufferCreateInfo-sharingMode-01419");
// Test for non-unique QFI in array
qfi[0] = 0;
CreateBufferTest(*this, &buffCI, "VUID-VkBufferCreateInfo-sharingMode-01419");
if (m_device->queue_props.size() > 2) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "which was not created allowing concurrent");
// Create buffer shared to queue families 1 and 2, but submitted on queue family 0
buffCI.queueFamilyIndexCount = 2;
qfi[0] = 1;
qfi[1] = 2;
VkBufferObj ib;
ib.init(*m_device, buffCI);
m_commandBuffer->begin();
vk::CmdFillBuffer(m_commandBuffer->handle(), ib.handle(), 0, 16, 5);
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, InvalidQueryPoolCreate) {
TEST_DESCRIPTION("Attempt to create a query pool for PIPELINE_STATISTICS without enabling pipeline stats for the device.");
ASSERT_NO_FATAL_FAILURE(Init());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
VkDevice local_device;
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
// Intentionally disable pipeline stats
features.pipelineStatisticsQuery = VK_FALSE;
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.pEnabledFeatures = &features;
VkResult err = vk::CreateDevice(gpu(), &device_create_info, nullptr, &local_device);
ASSERT_VK_SUCCESS(err);
VkQueryPoolCreateInfo qpci{};
qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS;
qpci.queryCount = 1;
VkQueryPool query_pool;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkQueryPoolCreateInfo-queryType-00791");
vk::CreateQueryPool(local_device, &qpci, nullptr, &query_pool);
m_errorMonitor->VerifyFound();
vk::DestroyDevice(local_device, nullptr);
}
TEST_F(VkLayerTest, UnclosedQuery) {
TEST_DESCRIPTION("End a command buffer with a query still in progress.");
const char *invalid_query = "VUID-vkEndCommandBuffer-commandBuffer-00061";
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue);
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, invalid_query);
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info = {};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0 /*startQuery*/, 1 /*queryCount*/);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vk::EndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, QueryPreciseBit) {
TEST_DESCRIPTION("Check for correct Query Precise Bit circumstances.");
ASSERT_NO_FATAL_FAILURE(Init());
// These tests require that the device support pipeline statistics query
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
if (VK_TRUE != device_features.pipelineStatisticsQuery) {
printf("%s Test requires unsupported pipelineStatisticsQuery feature. Skipped.\n", kSkipPrefix);
return;
}
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Test for precise bit when query type is not OCCLUSION
if (features.occlusionQueryPrecise) {
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->handle(), &event_create_info, nullptr, &event);
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryType-00800");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info = {};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->handle(), &query_pool_create_info, nullptr, &query_pool);
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, VK_QUERY_CONTROL_PRECISE_BIT);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
vk::DestroyQueryPool(m_device->handle(), query_pool, nullptr);
vk::DestroyEvent(m_device->handle(), event, nullptr);
}
// Test for precise bit when precise feature is not available
features.occlusionQueryPrecise = false;
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_;
VkCommandPool command_pool;
vk::CreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool);
VkCommandBufferAllocateInfo cmd = {};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = command_pool;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 1;
VkCommandBuffer cmd_buffer;
VkResult err = vk::AllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer);
ASSERT_VK_SUCCESS(err);
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(test_device.handle(), &event_create_info, nullptr, &event);
VkCommandBufferBeginInfo begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr};
vk::BeginCommandBuffer(cmd_buffer, &begin_info);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryType-00800");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info = {};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(test_device.handle(), &query_pool_create_info, nullptr, &query_pool);
vk::CmdResetQueryPool(cmd_buffer, query_pool, 0, 1);
vk::CmdBeginQuery(cmd_buffer, query_pool, 0, VK_QUERY_CONTROL_PRECISE_BIT);
m_errorMonitor->VerifyFound();
vk::EndCommandBuffer(cmd_buffer);
vk::DestroyQueryPool(test_device.handle(), query_pool, nullptr);
vk::DestroyEvent(test_device.handle(), event, nullptr);
vk::DestroyCommandPool(test_device.handle(), command_pool, nullptr);
}
TEST_F(VkLayerTest, StageMaskGsTsEnabled) {
TEST_DESCRIPTION(
"Attempt to use a stageMask w/ geometry shader and tesselation shader bits enabled when those features are disabled on the "
"device.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Make sure gs & ts are disabled
features.geometryShader = false;
features.tessellationShader = false;
// The sacrificial device object
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_;
VkCommandPool command_pool;
vk::CreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool);
VkCommandBufferAllocateInfo cmd = {};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = command_pool;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 1;
VkCommandBuffer cmd_buffer;
VkResult err = vk::AllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer);
ASSERT_VK_SUCCESS(err);
VkEvent event;
VkEventCreateInfo evci = {};
evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
VkResult result = vk::CreateEvent(test_device.handle(), &evci, NULL, &event);
ASSERT_VK_SUCCESS(result);
VkCommandBufferBeginInfo cbbi = {};
cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vk::BeginCommandBuffer(cmd_buffer, &cbbi);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetEvent-stageMask-01150");
vk::CmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetEvent-stageMask-01151");
vk::CmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT);
m_errorMonitor->VerifyFound();
vk::DestroyEvent(test_device.handle(), event, NULL);
vk::DestroyCommandPool(test_device.handle(), command_pool, NULL);
}
TEST_F(VkLayerTest, DescriptorPoolInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete a DescriptorPool with a DescriptorSet that is in use.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create image to update the descriptor with
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
// Create Sampler
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
// Create PSO to be used for draw-time errors below
VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.dsl_bindings_ = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
// Update descriptor with image and sampler
pipe.descriptor_set_->WriteDescriptorImageInfo(0, view, sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
pipe.descriptor_set_->UpdateDescriptorSets();
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, NULL);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put pool in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy pool while in-flight, causing error
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyDescriptorPool-descriptorPool-00303");
vk::DestroyDescriptorPool(m_device->device(), pipe.descriptor_set_->pool_, NULL);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Cleanup
vk::DestroySampler(m_device->device(), sampler, NULL);
m_errorMonitor->SetUnexpectedError(
"If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle");
m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj");
// TODO : It seems Validation layers think ds_pool was already destroyed, even though it wasn't?
}
TEST_F(VkLayerTest, FramebufferInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use framebuffer.");
ASSERT_NO_FATAL_FAILURE(Init());
VkFormatProperties format_properties;
VkResult err = VK_SUCCESS;
vk::GetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageObj image(m_device);
image.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Just use default renderpass with our framebuffer
m_renderPassBeginInfo.framebuffer = fb;
// Create Null cmd buffer for submit
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put it in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy framebuffer while in-flight
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyFramebuffer-framebuffer-00892");
vk::DestroyFramebuffer(m_device->device(), fb, NULL);
m_errorMonitor->VerifyFound();
// Wait for queue to complete so we can safely destroy everything
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If framebuffer is not VK_NULL_HANDLE, framebuffer must be a valid VkFramebuffer handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Framebuffer obj");
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
}
TEST_F(VkLayerTest, FramebufferImageInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use image that's child of framebuffer.");
ASSERT_NO_FATAL_FAILURE(Init());
VkFormatProperties format_properties;
VkResult err = VK_SUCCESS;
vk::GetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageCreateInfo image_ci = {};
image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_ci.pNext = NULL;
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.format = VK_FORMAT_B8G8R8A8_UNORM;
image_ci.extent.width = 256;
image_ci.extent.height = 256;
image_ci.extent.depth = 1;
image_ci.mipLevels = 1;
image_ci.arrayLayers = 1;
image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_ci.flags = 0;
VkImageObj image(m_device);
image.init(&image_ci);
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1};
VkFramebuffer fb;
err = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Just use default renderpass with our framebuffer
m_renderPassBeginInfo.framebuffer = fb;
// Create Null cmd buffer for submit
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put it (and attached imageView) in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer to put framebuffer and children in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy image attached to framebuffer while in-flight
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyImage-image-01000");
vk::DestroyImage(m_device->device(), image.handle(), NULL);
m_errorMonitor->VerifyFound();
// Wait for queue to complete so we can safely destroy image and other objects
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If image is not VK_NULL_HANDLE, image must be a valid VkImage handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Image obj");
vk::DestroyFramebuffer(m_device->device(), fb, nullptr);
}
TEST_F(VkLayerTest, EventInUseDestroyedSignaled) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
vk::CmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
m_commandBuffer->end();
vk::DestroyEvent(m_device->device(), event, nullptr);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "that is invalid because bound");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InUseDestroyedSignaled) {
TEST_DESCRIPTION(
"Use vkCmdExecuteCommands with invalid state in primary and secondary command buffers. Delete objects that are in use. "
"Call VkQueueSubmit with an event that has been deleted.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->ExpectSuccess();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkFenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
VkFence fence;
ASSERT_VK_SUCCESS(vk::CreateFence(m_device->device(), &fence_create_info, nullptr, &fence));
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.InitState();
pipe.CreateGraphicsPipeline();
pipe.descriptor_set_->WriteDescriptorBufferInfo(0, buffer_test.GetBuffer(), 1024, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
pipe.descriptor_set_->UpdateDescriptorSets();
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
m_commandBuffer->begin();
vk::CmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, NULL);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, fence);
m_errorMonitor->Reset(); // resume logmsg processing
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyEvent-event-01145");
vk::DestroyEvent(m_device->device(), event, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroySemaphore-semaphore-01137");
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyFence-fence-01120");
vk::DestroyFence(m_device->device(), fence, nullptr);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If semaphore is not VK_NULL_HANDLE, semaphore must be a valid VkSemaphore handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Semaphore obj");
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->SetUnexpectedError("If fence is not VK_NULL_HANDLE, fence must be a valid VkFence handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Fence obj");
vk::DestroyFence(m_device->device(), fence, nullptr);
m_errorMonitor->SetUnexpectedError("If event is not VK_NULL_HANDLE, event must be a valid VkEvent handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Event obj");
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, EventStageMaskOneCommandBufferPass) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer1(m_device, m_commandPool);
VkCommandBufferObj commandBuffer2(m_device, m_commandPool);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
commandBuffer1.begin();
vk::CmdSetEvent(commandBuffer1.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
vk::CmdWaitEvents(commandBuffer1.handle(), 1, &event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, nullptr, 0, nullptr, 0, nullptr);
commandBuffer1.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &commandBuffer1.handle();
m_errorMonitor->ExpectSuccess();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, EventStageMaskOneCommandBufferFail) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer1(m_device, m_commandPool);
VkCommandBufferObj commandBuffer2(m_device, m_commandPool);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
commandBuffer1.begin();
vk::CmdSetEvent(commandBuffer1.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
// wrong srcStageMask
vk::CmdWaitEvents(commandBuffer1.handle(), 1, &event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0, nullptr, 0, nullptr, 0, nullptr);
commandBuffer1.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &commandBuffer1.handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdWaitEvents-srcStageMask-parameter");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, EventStageMaskTwoCommandBufferPass) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer1(m_device, m_commandPool);
VkCommandBufferObj commandBuffer2(m_device, m_commandPool);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
commandBuffer1.begin();
vk::CmdSetEvent(commandBuffer1.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
commandBuffer1.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &commandBuffer1.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
commandBuffer2.begin();
vk::CmdWaitEvents(commandBuffer2.handle(), 1, &event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, nullptr, 0, nullptr, 0, nullptr);
commandBuffer2.end();
submit_info.pCommandBuffers = &commandBuffer2.handle();
m_errorMonitor->ExpectSuccess();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, EventStageMaskTwoCommandBufferFail) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj commandBuffer1(m_device, m_commandPool);
VkCommandBufferObj commandBuffer2(m_device, m_commandPool);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
commandBuffer1.begin();
vk::CmdSetEvent(commandBuffer1.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
commandBuffer1.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &commandBuffer1.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
commandBuffer2.begin();
// wrong srcStageMask
vk::CmdWaitEvents(commandBuffer2.handle(), 1, &event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0, nullptr, 0, nullptr, 0, nullptr);
commandBuffer2.end();
submit_info.pCommandBuffers = &commandBuffer2.handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdWaitEvents-srcStageMask-parameter");
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, QueryPoolPartialTimestamp) {
TEST_DESCRIPTION("Request partial result on timestamp query.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_ci.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
// Use setup as a positive test...
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdWriteTimestamp(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, query_pool, 0);
m_commandBuffer->end();
// Submit cmd buffer and wait for it.
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyNotFound();
// Attempt to obtain partial results.
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-00818");
uint32_t data_space[16];
m_errorMonitor->SetUnexpectedError("Cannot get query results on queryPool");
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, sizeof(uint32_t),
VK_QUERY_RESULT_PARTIAL_BIT);
m_errorMonitor->VerifyFound();
// Destroy query pool.
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPoolInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use query pool.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_ci.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_commandBuffer->begin();
// Use query pool to create binding with cmd buffer
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdWriteTimestamp(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, query_pool, 0);
m_commandBuffer->end();
// Submit cmd buffer and then destroy query pool while in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyQueryPool-queryPool-00793");
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Now that cmd buffer done we can safely destroy query_pool
m_errorMonitor->SetUnexpectedError("If queryPool is not VK_NULL_HANDLE, queryPool must be a valid VkQueryPool handle");
m_errorMonitor->SetUnexpectedError("Unable to remove QueryPool obj");
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
}
TEST_F(VkLayerTest, PipelineInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use pipeline.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const VkPipelineLayoutObj pipeline_layout(m_device);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyPipeline-pipeline-00765");
// Create PSO to be used for draw-time errors below
// Store pipeline handle so we can actually delete it before test finishes
VkPipeline delete_this_pipeline;
{ // Scope pipeline so it will be auto-deleted
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.InitState();
pipe.CreateGraphicsPipeline();
delete_this_pipeline = pipe.pipeline_;
m_commandBuffer->begin();
// Bind pipeline to cmd buffer
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then pipeline destroyed while in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
} // Pipeline deletion triggered here
m_errorMonitor->VerifyFound();
// Make sure queue finished and then actually delete pipeline
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If pipeline is not VK_NULL_HANDLE, pipeline must be a valid VkPipeline handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Pipeline obj");
vk::DestroyPipeline(m_device->handle(), delete_this_pipeline, nullptr);
}
TEST_F(VkLayerTest, ImageViewInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use imageView.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err;
err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
// Create PSO to use the sampler
VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.dsl_bindings_ = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
pipe.descriptor_set_->WriteDescriptorImageInfo(0, view, sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
pipe.descriptor_set_->UpdateDescriptorSets();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyImageView-imageView-01026");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Bind pipeline to cmd buffer
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer then destroy sampler
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy imageView while in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::DestroyImageView(m_device->device(), view, nullptr);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Now we can actually destroy imageView
m_errorMonitor->SetUnexpectedError("If imageView is not VK_NULL_HANDLE, imageView must be a valid VkImageView handle");
m_errorMonitor->SetUnexpectedError("Unable to remove ImageView obj");
vk::DestroySampler(m_device->device(), sampler, nullptr);
}
TEST_F(VkLayerTest, BufferViewInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use bufferView.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkBufferObj buffer;
buffer.init(*m_device, buffer_create_info);
VkBufferView view;
VkBufferViewCreateInfo bvci = {};
bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
bvci.buffer = buffer.handle();
bvci.format = VK_FORMAT_R32_SFLOAT;
bvci.range = VK_WHOLE_SIZE;
VkResult err = vk::CreateBufferView(m_device->device(), &bvci, NULL, &view);
ASSERT_VK_SUCCESS(err);
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = imageLoad(s, 0);\n"
"}\n";
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.dsl_bindings_ = {
{0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
pipe.descriptor_set_->WriteDescriptorBufferView(0, view, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
pipe.descriptor_set_->UpdateDescriptorSets();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroyBufferView-bufferView-00936");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
// Bind pipeline to cmd buffer
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, nullptr);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy bufferView while in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::DestroyBufferView(m_device->device(), view, nullptr);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Now we can actually destroy bufferView
m_errorMonitor->SetUnexpectedError("If bufferView is not VK_NULL_HANDLE, bufferView must be a valid VkBufferView handle");
m_errorMonitor->SetUnexpectedError("Unable to remove BufferView obj");
vk::DestroyBufferView(m_device->device(), view, NULL);
}
TEST_F(VkLayerTest, SamplerInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use sampler.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err;
err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
// Create PSO to use the sampler
VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
CreatePipelineHelper pipe(*this);
pipe.InitInfo();
pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()};
pipe.dsl_bindings_ = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
pipe.dyn_state_ci_ = dyn_state_ci;
pipe.InitState();
pipe.CreateGraphicsPipeline();
pipe.descriptor_set_->WriteDescriptorImageInfo(0, view, sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
pipe.descriptor_set_->UpdateDescriptorSets();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkDestroySampler-sampler-01082");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Bind pipeline to cmd buffer
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_);
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer then destroy sampler
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy sampler while in-flight
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::DestroySampler(m_device->device(), sampler, nullptr); // Destroyed too soon
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(m_device->m_queue);
// Now we can actually destroy sampler
m_errorMonitor->SetUnexpectedError("If sampler is not VK_NULL_HANDLE, sampler must be a valid VkSampler handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Sampler obj");
vk::DestroySampler(m_device->device(), sampler, NULL); // Destroyed for real
}
TEST_F(VkLayerTest, QueueForwardProgressFenceWait) {
TEST_DESCRIPTION("Call VkQueueSubmit with a semaphore that is already signaled but not waited on by the queue.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const char *queue_forward_progress_message = "UNASSIGNED-CoreValidation-DrawState-QueueForwardProgress";
VkCommandBufferObj cb1(m_device, m_commandPool);
cb1.begin();
cb1.end();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cb1.handle();
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_commandBuffer->begin();
m_commandBuffer->end();
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, queue_forward_progress_message);
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DeviceWaitIdle(m_device->device());
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
#if GTEST_IS_THREADSAFE
TEST_F(VkLayerTest, ThreadCommandBufferCollision) {
test_platform_thread thread;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "THREADING ERROR");
m_errorMonitor->SetAllowedFailureMsg("THREADING ERROR"); // Ignore any extra threading errors found beyond the first one
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Calls AllocateCommandBuffers
VkCommandBufferObj commandBuffer(m_device, m_commandPool);
commandBuffer.begin();
VkEventCreateInfo event_info;
VkEvent event;
VkResult err;
memset(&event_info, 0, sizeof(event_info));
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
err = vk::CreateEvent(device(), &event_info, NULL, &event);
ASSERT_VK_SUCCESS(err);
err = vk::ResetEvent(device(), event);
ASSERT_VK_SUCCESS(err);
struct thread_data_struct data;
data.commandBuffer = commandBuffer.handle();
data.event = event;
bool bailout = false;
data.bailout = &bailout;
m_errorMonitor->SetBailout(data.bailout);
// First do some correct operations using multiple threads.
// Add many entries to command buffer from another thread.
test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data);
// Make non-conflicting calls from this thread at the same time.
for (int i = 0; i < 80000; i++) {
uint32_t count;
vk::EnumeratePhysicalDevices(instance(), &count, NULL);
}
test_platform_thread_join(thread, NULL);
// Then do some incorrect operations using multiple threads.
// Add many entries to command buffer from another thread.
test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data);
// Add many entries to command buffer from this thread at the same time.
AddToCommandBuffer(&data);
test_platform_thread_join(thread, NULL);
commandBuffer.end();
m_errorMonitor->SetBailout(NULL);
m_errorMonitor->VerifyFound();
vk::DestroyEvent(device(), event, NULL);
}
TEST_F(VkLayerTest, ThreadUpdateDescriptorCollision) {
TEST_DESCRIPTION("Two threads updating the same descriptor set, expected to generate a threading error");
test_platform_thread thread;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "THREADING ERROR : vkUpdateDescriptorSets");
m_errorMonitor->SetAllowedFailureMsg("THREADING ERROR"); // Ignore any extra threading errors found beyond the first one
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet normal_descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
},
0);
VkBufferObj buffer;
buffer.init(*m_device, 256, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
struct thread_data_struct data;
data.device = device();
data.descriptorSet = normal_descriptor_set.set_;
data.binding = 0;
data.buffer = buffer.handle();
bool bailout = false;
data.bailout = &bailout;
m_errorMonitor->SetBailout(data.bailout);
// Update descriptors from another thread.
test_platform_thread_create(&thread, UpdateDescriptor, (void *)&data);
// Update descriptors from this thread at the same time.
struct thread_data_struct data2;
data2.device = device();
data2.descriptorSet = normal_descriptor_set.set_;
data2.binding = 1;
data2.buffer = buffer.handle();
data2.bailout = &bailout;
UpdateDescriptor(&data2);
test_platform_thread_join(thread, NULL);
m_errorMonitor->SetBailout(NULL);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ThreadUpdateDescriptorUpdateAfterBindNoCollision) {
TEST_DESCRIPTION("Two threads updating the same UAB descriptor set, expected not to generate a threading error");
test_platform_thread thread;
m_errorMonitor->ExpectSuccess();
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME) &&
DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE3_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME);
} else {
printf("%s Descriptor Indexing or Maintenance3 Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
// Create a device that enables descriptorBindingStorageBufferUpdateAfterBind
auto indexing_features = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexing_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (VK_FALSE == indexing_features.descriptorBindingStorageBufferUpdateAfterBind) {
printf("%s Test requires (unsupported) descriptorBindingStorageBufferUpdateAfterBind, skipping\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::array<VkDescriptorBindingFlagsEXT, 2> flags = {VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT,
VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT};
auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>();
flags_create_info.bindingCount = (uint32_t)flags.size();
flags_create_info.pBindingFlags = flags.data();
OneOffDescriptorSet normal_descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
},
VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT, &flags_create_info,
VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT);
VkBufferObj buffer;
buffer.init(*m_device, 256, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
struct thread_data_struct data;
data.device = device();
data.descriptorSet = normal_descriptor_set.set_;
data.binding = 0;
data.buffer = buffer.handle();
bool bailout = false;
data.bailout = &bailout;
m_errorMonitor->SetBailout(data.bailout);
// Update descriptors from another thread.
test_platform_thread_create(&thread, UpdateDescriptor, (void *)&data);
// Update descriptors from this thread at the same time.
struct thread_data_struct data2;
data2.device = device();
data2.descriptorSet = normal_descriptor_set.set_;
data2.binding = 1;
data2.buffer = buffer.handle();
data2.bailout = &bailout;
UpdateDescriptor(&data2);
test_platform_thread_join(thread, NULL);
m_errorMonitor->SetBailout(NULL);
m_errorMonitor->VerifyNotFound();
}
#endif // GTEST_IS_THREADSAFE
TEST_F(VkLayerTest, ExecuteUnrecordedPrimaryCB) {
TEST_DESCRIPTION("Attempt vkQueueSubmit with a CB in the initial state");
ASSERT_NO_FATAL_FAILURE(Init());
// never record m_commandBuffer
VkSubmitInfo si = {};
si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
si.commandBufferCount = 1;
si.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkQueueSubmit-pCommandBuffers-00072");
vk::QueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, Maintenance1AndNegativeViewport) {
TEST_DESCRIPTION("Attempt to enable AMD_negative_viewport_height and Maintenance1_KHR extension simultaneously");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!((DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) &&
(DeviceExtensionSupported(gpu(), nullptr, VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME)))) {
printf("%s Maintenance1 and AMD_negative viewport height extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
const char *extension_names[2] = {"VK_KHR_maintenance1", "VK_AMD_negative_viewport_height"};
VkDevice testDevice;
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.enabledExtensionCount = 2;
device_create_info.ppEnabledExtensionNames = (const char *const *)extension_names;
device_create_info.pEnabledFeatures = &features;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-00374");
// The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms that do
// not use the LunarG loader (e.g. Android) will not see the message and the test will fail.
m_errorMonitor->SetUnexpectedError("Failed to create device chain.");
vk::CreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InstanceDebugReportCallback) {
TEST_DESCRIPTION("Test that a pNext-installed debug callback will catch a CreateInstance-time error.");
// This instance extension requires that the VK_KHR_get_surface_capabilities2 also be enabled
if (!InstanceExtensionSupported(VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME);
return;
}
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCreateInstance-ppEnabledExtensionNames-01388");
// Enable the instance extension, but none of the extensions it depends on
m_instance_extension_names.push_back(VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, HostQueryResetNotEnabled) {
TEST_DESCRIPTION("Use vkResetQueryPoolEXT without enabling the feature");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-None-02665");
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 1);
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, HostQueryResetBadFirstQuery) {
TEST_DESCRIPTION("Bad firstQuery in vkResetQueryPoolEXT");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
SetTargetApiVersion(VK_API_VERSION_1_2);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-firstQuery-02666");
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 1, 0);
m_errorMonitor->VerifyFound();
if (DeviceValidationVersion() >= VK_API_VERSION_1_2) {
auto fpvkResetQueryPool = (PFN_vkResetQueryPool)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPool");
if (nullptr == fpvkResetQueryPool) {
m_errorMonitor->ExpectSuccess();
m_errorMonitor->SetError("No ProcAddr for 1.2 core vkResetQueryPool");
m_errorMonitor->VerifyNotFound();
} else {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-firstQuery-02666");
fpvkResetQueryPool(m_device->device(), query_pool, 1, 0);
m_errorMonitor->VerifyFound();
}
}
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, HostQueryResetBadRange) {
TEST_DESCRIPTION("Bad range in vkResetQueryPoolEXT");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-firstQuery-02667");
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 2);
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
}
TEST_F(VkLayerTest, HostQueryResetInvalidQueryPool) {
TEST_DESCRIPTION("Invalid queryPool in vkResetQueryPoolEXT");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
// Create and destroy a query pool.
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
// Attempt to reuse the query pool handle.
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-queryPool-parameter");
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 1);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, HostQueryResetWrongDevice) {
TEST_DESCRIPTION("Device not matching queryPool in vkResetQueryPoolEXT");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
// Create a second device with the feature enabled.
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
auto features = m_device->phy().features();
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = &host_query_reset_features;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.pEnabledFeatures = &features;
device_create_info.enabledExtensionCount = m_device_extension_names.size();
device_create_info.ppEnabledExtensionNames = m_device_extension_names.data();
VkDevice second_device;
ASSERT_VK_SUCCESS(vk::CreateDevice(gpu(), &device_create_info, nullptr, &second_device));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkResetQueryPool-queryPool-parent");
// Run vk::ResetQueryPoolExt on the wrong device.
fpvkResetQueryPoolEXT(second_device, query_pool, 0, 1);
m_errorMonitor->VerifyFound();
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
vk::DestroyDevice(second_device, nullptr);
}
TEST_F(VkLayerTest, ResetEventThenSet) {
TEST_DESCRIPTION("Reset an event then set it after the reset has been submitted.");
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vk::CreateEvent(m_device->device(), &event_create_info, nullptr, &event);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer;
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vk::BeginCommandBuffer(command_buffer, &begin_info);
vk::CmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
vk::EndCommandBuffer(command_buffer);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "that is already in use by a command buffer.");
vk::SetEvent(m_device->device(), event);
m_errorMonitor->VerifyFound();
}
vk::QueueWaitIdle(queue);
vk::DestroyEvent(m_device->device(), event, nullptr);
vk::FreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer);
vk::DestroyCommandPool(m_device->device(), command_pool, NULL);
}
TEST_F(VkLayerTest, ShadingRateImageNV) {
TEST_DESCRIPTION("Test VK_NV_shading_rate_image.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
std::array<const char *, 1> required_device_extensions = {{VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME}};
for (auto device_extension : required_device_extensions) {
if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) {
m_device_extension_names.push_back(device_extension);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension);
return;
}
}
if (DeviceIsMockICD() || DeviceSimulation()) {
printf("%s Test not supported by MockICD, skipping tests\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
// Create a device that enables shading_rate_image but disables multiViewport
auto shading_rate_image_features = lvl_init_struct<VkPhysicalDeviceShadingRateImageFeaturesNV>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&shading_rate_image_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
features2.features.multiViewport = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Test shading rate image creation
VkResult result = VK_RESULT_MAX_ENUM;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8_UINT;
image_create_info.extent.width = 4;
image_create_info.extent.height = 4;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
// image type must be 2D
image_create_info.imageType = VK_IMAGE_TYPE_3D;
CreateImageTest(*this, &image_create_info, "VUID-VkImageCreateInfo-imageType-02082");
image_create_info.imageType = VK_IMAGE_TYPE_2D;
// must be single sample
image_create_info.samples = VK_SAMPLE_COUNT_2_BIT;
CreateImageTest(*this, &image_create_info, "VUID-VkImageCreateInfo-samples-02083");
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
// tiling must be optimal
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
CreateImageTest(*this, &image_create_info, "VUID-VkImageCreateInfo-tiling-02084");
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
// Should succeed.
VkImageObj image(m_device);
image.init(&image_create_info);
// Test image view creation
VkImageView view;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8_UINT;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// view type must be 2D or 2D_ARRAY
ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02086");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-01003");
result = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vk::DestroyImageView(m_device->device(), view, NULL);
view = VK_NULL_HANDLE;
}
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
// format must be R8_UINT
ivci.format = VK_FORMAT_R8_UNORM;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02087");
result = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vk::DestroyImageView(m_device->device(), view, NULL);
view = VK_NULL_HANDLE;
}
ivci.format = VK_FORMAT_R8_UINT;
vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
m_errorMonitor->VerifyNotFound();
// Test pipeline creation
VkPipelineViewportShadingRateImageStateCreateInfoNV vsrisci = {
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV};
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkViewport viewports[20] = {viewport, viewport};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkRect2D scissors[20] = {scissor, scissor};
VkDynamicState dynPalette = VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV;
VkPipelineDynamicStateCreateInfo dyn = {VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, nullptr, 0, 1, &dynPalette};
// viewportCount must be 0 or 1 when multiViewport is disabled
{
const auto break_vp = [&](CreatePipelineHelper &helper) {
helper.vp_state_ci_.viewportCount = 2;
helper.vp_state_ci_.pViewports = viewports;
helper.vp_state_ci_.scissorCount = 2;
helper.vp_state_ci_.pScissors = scissors;
helper.vp_state_ci_.pNext = &vsrisci;
helper.dyn_state_ci_ = dyn;
vsrisci.shadingRateImageEnable = VK_TRUE;
vsrisci.viewportCount = 2;
};
CreatePipelineHelper::OneshotTest(
*this, break_vp, kErrorBit,
vector<std::string>({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-viewportCount-02054",
"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}));
}
// viewportCounts must match
{
const auto break_vp = [&](CreatePipelineHelper &helper) {
helper.vp_state_ci_.viewportCount = 1;
helper.vp_state_ci_.pViewports = viewports;
helper.vp_state_ci_.scissorCount = 1;
helper.vp_state_ci_.pScissors = scissors;
helper.vp_state_ci_.pNext = &vsrisci;
helper.dyn_state_ci_ = dyn;
vsrisci.shadingRateImageEnable = VK_TRUE;
vsrisci.viewportCount = 0;
};
CreatePipelineHelper::OneshotTest(
*this, break_vp, kErrorBit,
vector<std::string>({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-shadingRateImageEnable-02056"}));
}
// pShadingRatePalettes must not be NULL.
{
const auto break_vp = [&](CreatePipelineHelper &helper) {
helper.vp_state_ci_.viewportCount = 1;
helper.vp_state_ci_.pViewports = viewports;
helper.vp_state_ci_.scissorCount = 1;
helper.vp_state_ci_.pScissors = scissors;
helper.vp_state_ci_.pNext = &vsrisci;
vsrisci.shadingRateImageEnable = VK_TRUE;
vsrisci.viewportCount = 1;
};
CreatePipelineHelper::OneshotTest(
*this, break_vp, kErrorBit,
vector<std::string>({"VUID-VkPipelineViewportShadingRateImageStateCreateInfoNV-pDynamicStates-02057"}));
}
// Create an image without the SRI bit
VkImageObj nonSRIimage(m_device);
nonSRIimage.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(nonSRIimage.initialized());
VkImageView nonSRIview = nonSRIimage.targetView(VK_FORMAT_B8G8R8A8_UNORM);
// Test SRI layout on non-SRI image
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.pNext = nullptr;
img_barrier.srcAccessMask = 0;
img_barrier.dstAccessMask = 0;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV;
img_barrier.image = nonSRIimage.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
m_commandBuffer->begin();
// Error trying to convert it to SRI layout
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageMemoryBarrier-oldLayout-02088");
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
// succeed converting it to GENERAL
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyNotFound();
// Test vk::CmdBindShadingRateImageNV errors
auto vkCmdBindShadingRateImageNV =
(PFN_vkCmdBindShadingRateImageNV)vk::GetDeviceProcAddr(m_device->device(), "vkCmdBindShadingRateImageNV");
// if the view is non-NULL, it must be R8_UINT, USAGE_SRI, image layout must match, layout must be valid
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindShadingRateImageNV-imageView-02060");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindShadingRateImageNV-imageView-02061");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindShadingRateImageNV-imageView-02062");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063");
vkCmdBindShadingRateImageNV(m_commandBuffer->handle(), nonSRIview, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
m_errorMonitor->VerifyFound();
// Test vk::CmdSetViewportShadingRatePaletteNV errors
auto vkCmdSetViewportShadingRatePaletteNV =
(PFN_vkCmdSetViewportShadingRatePaletteNV)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetViewportShadingRatePaletteNV");
VkShadingRatePaletteEntryNV paletteEntries[100] = {};
VkShadingRatePaletteNV palette = {100, paletteEntries};
VkShadingRatePaletteNV palettes[] = {palette, palette};
// errors on firstViewport/viewportCount
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02066");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02067");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportShadingRatePaletteNV-firstViewport-02068");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetViewportShadingRatePaletteNV-viewportCount-02069");
vkCmdSetViewportShadingRatePaletteNV(m_commandBuffer->handle(), 20, 2, palettes);
m_errorMonitor->VerifyFound();
// shadingRatePaletteEntryCount must be in range
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071");
vkCmdSetViewportShadingRatePaletteNV(m_commandBuffer->handle(), 0, 1, palettes);
m_errorMonitor->VerifyFound();
VkCoarseSampleLocationNV locations[100] = {
{0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1}, {0, 1, 1}, // duplicate
{1000, 0, 0}, // pixelX too large
{0, 1000, 0}, // pixelY too large
{0, 0, 1000}, // sample too large
};
// Test custom sample orders, both via pipeline state and via dynamic state
{
VkCoarseSampleOrderCustomNV sampOrdBadShadingRate = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV, 1, 1,
locations};
VkCoarseSampleOrderCustomNV sampOrdBadSampleCount = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 3, 1,
locations};
VkCoarseSampleOrderCustomNV sampOrdBadSampleLocationCount = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV,
2, 2, locations};
VkCoarseSampleOrderCustomNV sampOrdDuplicateLocations = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2,
1 * 2 * 2, &locations[1]};
VkCoarseSampleOrderCustomNV sampOrdOutOfRangeLocations = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2,
1 * 2 * 2, &locations[4]};
VkCoarseSampleOrderCustomNV sampOrdTooLargeSampleLocationCount = {
VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV, 4, 64, &locations[8]};
VkCoarseSampleOrderCustomNV sampOrdGood = {VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, 2, 1 * 2 * 2,
&locations[0]};
VkPipelineViewportCoarseSampleOrderStateCreateInfoNV csosci = {
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV};
csosci.sampleOrderType = VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV;
csosci.customSampleOrderCount = 1;
using std::vector;
struct TestCase {
const VkCoarseSampleOrderCustomNV *order;
vector<std::string> vuids;
};
vector<TestCase> test_cases = {
{&sampOrdBadShadingRate, {"VUID-VkCoarseSampleOrderCustomNV-shadingRate-02073"}},
{&sampOrdBadSampleCount,
{"VUID-VkCoarseSampleOrderCustomNV-sampleCount-02074", "VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075"}},
{&sampOrdBadSampleLocationCount, {"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02075"}},
{&sampOrdDuplicateLocations, {"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077"}},
{&sampOrdOutOfRangeLocations,
{"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077", "VUID-VkCoarseSampleLocationNV-pixelX-02078",
"VUID-VkCoarseSampleLocationNV-pixelY-02079", "VUID-VkCoarseSampleLocationNV-sample-02080"}},
{&sampOrdTooLargeSampleLocationCount,
{"VUID-VkCoarseSampleOrderCustomNV-sampleLocationCount-02076",
"VUID-VkCoarseSampleOrderCustomNV-pSampleLocations-02077"}},
{&sampOrdGood, {}},
};
for (const auto &test_case : test_cases) {
const auto break_vp = [&](CreatePipelineHelper &helper) {
helper.vp_state_ci_.pNext = &csosci;
csosci.pCustomSampleOrders = test_case.order;
};
CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit, test_case.vuids);
}
// Test vk::CmdSetCoarseSampleOrderNV errors
auto vkCmdSetCoarseSampleOrderNV =
(PFN_vkCmdSetCoarseSampleOrderNV)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetCoarseSampleOrderNV");
for (const auto &test_case : test_cases) {
for (uint32_t i = 0; i < test_case.vuids.size(); ++i) {
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, test_case.vuids[i]);
}
vkCmdSetCoarseSampleOrderNV(m_commandBuffer->handle(), VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, 1, test_case.order);
if (test_case.vuids.size()) {
m_errorMonitor->VerifyFound();
} else {
m_errorMonitor->VerifyNotFound();
}
}
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetCoarseSampleOrderNV-sampleOrderType-02081");
vkCmdSetCoarseSampleOrderNV(m_commandBuffer->handle(), VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV, 1, &sampOrdGood);
m_errorMonitor->VerifyFound();
}
m_commandBuffer->end();
vk::DestroyImageView(m_device->device(), view, NULL);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
#include "android_ndk_types.h"
TEST_F(VkLayerTest, AndroidHardwareBufferImageCreate) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer image create info.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkImage img = VK_NULL_HANDLE;
auto reset_img = [&img, dev]() {
if (VK_NULL_HANDLE != img) vk::DestroyImage(dev, img, NULL);
img = VK_NULL_HANDLE;
};
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.pNext = nullptr;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {64, 64, 1};
ici.format = VK_FORMAT_UNDEFINED;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
// undefined format
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-01975");
m_errorMonitor->SetUnexpectedError("VUID_Undefined");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
// also undefined format
VkExternalFormatANDROID efa = {};
efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa.externalFormat = 0;
ici.pNext = &efa;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-01975");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
// undefined format with an unknown external format
efa.externalFormat = 0xBADC0DE;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkExternalFormatANDROID-externalFormat-01894");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
AHardwareBuffer *ahb;
AHardwareBuffer_Desc ahb_desc = {};
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 64;
ahb_desc.layers = 1;
// Allocate an AHardwareBuffer
AHardwareBuffer_allocate(&ahb_desc, &ahb);
// Retrieve it's properties to make it's external format 'known' (AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM)
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {};
ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_fmt_props;
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID");
ASSERT_TRUE(pfn_GetAHBProps != nullptr);
pfn_GetAHBProps(dev, ahb, &ahb_props);
// a defined image format with a non-zero external format
ici.format = VK_FORMAT_R8G8B8A8_UNORM;
efa.externalFormat = ahb_fmt_props.externalFormat;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-01974");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
ici.format = VK_FORMAT_UNDEFINED;
// external format while MUTABLE
ici.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02396");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
ici.flags = 0;
// external format while usage other than SAMPLED
ici.usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02397");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
// external format while tiline other than OPTIMAL
ici.tiling = VK_IMAGE_TILING_LINEAR;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02398");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
// imageType
VkExternalMemoryImageCreateInfo emici = {};
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
ici.pNext = &emici; // remove efa from chain, insert emici
ici.format = VK_FORMAT_R8G8B8A8_UNORM;
ici.imageType = VK_IMAGE_TYPE_3D;
ici.extent = {64, 64, 64};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02393");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
// wrong mipLevels
ici.imageType = VK_IMAGE_TYPE_2D;
ici.extent = {64, 64, 1};
ici.mipLevels = 6; // should be 7
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageCreateInfo-pNext-02394");
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyFound();
reset_img();
}
TEST_F(VkLayerTest, AndroidHardwareBufferFetchUnboundImageInfo) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer retreive image properties while memory unbound.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkImage img = VK_NULL_HANDLE;
auto reset_img = [&img, dev]() {
if (VK_NULL_HANDLE != img) vk::DestroyImage(dev, img, NULL);
img = VK_NULL_HANDLE;
};
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.pNext = nullptr;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {64, 64, 1};
ici.format = VK_FORMAT_R8G8B8A8_UNORM;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_LINEAR;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
VkExternalMemoryImageCreateInfo emici = {};
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
emici.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
ici.pNext = &emici;
m_errorMonitor->ExpectSuccess();
vk::CreateImage(dev, &ici, NULL, &img);
m_errorMonitor->VerifyNotFound();
// attempt to fetch layout from unbound image
VkImageSubresource sub_rsrc = {};
sub_rsrc.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkSubresourceLayout sub_layout = {};
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetImageSubresourceLayout-image-01895");
vk::GetImageSubresourceLayout(dev, img, &sub_rsrc, &sub_layout);
m_errorMonitor->VerifyFound();
// attempt to get memory reqs from unbound image
VkImageMemoryRequirementsInfo2 imri = {};
imri.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2;
imri.image = img;
VkMemoryRequirements2 mem_reqs = {};
mem_reqs.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageMemoryRequirementsInfo2-image-01897");
vk::GetImageMemoryRequirements2(dev, &imri, &mem_reqs);
m_errorMonitor->VerifyFound();
reset_img();
}
TEST_F(VkLayerTest, AndroidHardwareBufferMemoryAllocation) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer memory allocation.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkImage img = VK_NULL_HANDLE;
auto reset_img = [&img, dev]() {
if (VK_NULL_HANDLE != img) vk::DestroyImage(dev, img, NULL);
img = VK_NULL_HANDLE;
};
VkDeviceMemory mem_handle = VK_NULL_HANDLE;
auto reset_mem = [&mem_handle, dev]() {
if (VK_NULL_HANDLE != mem_handle) vk::FreeMemory(dev, mem_handle, NULL);
mem_handle = VK_NULL_HANDLE;
};
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID");
ASSERT_TRUE(pfn_GetAHBProps != nullptr);
// AHB structs
AHardwareBuffer *ahb = nullptr;
AHardwareBuffer_Desc ahb_desc = {};
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {};
ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_fmt_props;
VkImportAndroidHardwareBufferInfoANDROID iahbi = {};
iahbi.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
// destroy and re-acquire an AHB, and fetch it's properties
auto recreate_ahb = [&ahb, &iahbi, &ahb_desc, &ahb_props, dev, pfn_GetAHBProps]() {
if (ahb) AHardwareBuffer_release(ahb);
ahb = nullptr;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
if (ahb) {
pfn_GetAHBProps(dev, ahb, &ahb_props);
iahbi.buffer = ahb;
}
};
// Allocate an AHardwareBuffer
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 64;
ahb_desc.layers = 1;
recreate_ahb();
// Create an image w/ external format
VkExternalFormatANDROID efa = {};
efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa.externalFormat = ahb_fmt_props.externalFormat;
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.pNext = &efa;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {64, 64, 1};
ici.format = VK_FORMAT_UNDEFINED;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
VkResult res = vk::CreateImage(dev, &ici, NULL, &img);
ASSERT_VK_SUCCESS(res);
VkMemoryAllocateInfo mai = {};
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mai.pNext = &iahbi; // Chained import struct
mai.allocationSize = ahb_props.allocationSize;
mai.memoryTypeIndex = 32;
// Set index to match one of the bits in ahb_props
for (int i = 0; i < 32; i++) {
if (ahb_props.memoryTypeBits & (1 << i)) {
mai.memoryTypeIndex = i;
break;
}
}
ASSERT_NE(32, mai.memoryTypeIndex);
// Import w/ non-dedicated memory allocation
// Import requires format AHB_FMT_BLOB and usage AHB_USAGE_GPU_DATA_BUFFER
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02384");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Allocation size mismatch
ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
ahb_desc.height = 1;
recreate_ahb();
mai.allocationSize = ahb_props.allocationSize + 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-allocationSize-02383");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
mai.allocationSize = ahb_props.allocationSize;
reset_mem();
// memoryTypeIndex mismatch
mai.memoryTypeIndex++;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
mai.memoryTypeIndex--;
reset_mem();
// Insert dedicated image memory allocation to mai chain
VkMemoryDedicatedAllocateInfo mdai = {};
mdai.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
mdai.image = img;
mdai.buffer = VK_NULL_HANDLE;
mdai.pNext = mai.pNext;
mai.pNext = &mdai;
// Dedicated allocation with unmatched usage bits
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
ahb_desc.height = 64;
recreate_ahb();
mai.allocationSize = ahb_props.allocationSize;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02390");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Dedicated allocation with incomplete mip chain
reset_img();
ici.mipLevels = 2;
vk::CreateImage(dev, &ici, NULL, &img);
mdai.image = img;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE;
recreate_ahb();
if (ahb) {
mai.allocationSize = ahb_props.allocationSize;
for (int i = 0; i < 32; i++) {
if (ahb_props.memoryTypeBits & (1 << i)) {
mai.memoryTypeIndex = i;
break;
}
}
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02389");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
} else {
// ERROR: AHardwareBuffer_allocate() with MIPMAP_COMPLETE fails. It returns -12, NO_MEMORY.
// The problem seems to happen in Pixel 2, not Pixel 3.
printf("%s AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE not supported, skipping tests\n", kSkipPrefix);
}
// Dedicated allocation with mis-matched dimension
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.height = 32;
ahb_desc.width = 128;
recreate_ahb();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02388");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Dedicated allocation with mis-matched VkFormat
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.height = 64;
ahb_desc.width = 64;
recreate_ahb();
ici.mipLevels = 1;
ici.format = VK_FORMAT_B8G8R8A8_UNORM;
ici.pNext = NULL;
VkImage img2;
vk::CreateImage(dev, &ici, NULL, &img2);
mdai.image = img2;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02387");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
vk::DestroyImage(dev, img2, NULL);
mdai.image = img;
reset_mem();
// Missing required ahb usage
ahb_desc.usage = AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884");
recreate_ahb();
m_errorMonitor->VerifyFound();
// Dedicated allocation with missing usage bits
// Setting up this test also triggers a slew of others
mai.allocationSize = ahb_props.allocationSize + 1;
mai.memoryTypeIndex = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02390");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-allocationSize-02383");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02386");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
// Non-import allocation - replace import struct in chain with export struct
VkExportMemoryAllocateInfo emai = {};
emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
mai.pNext = &emai;
emai.pNext = &mdai; // still dedicated
mdai.pNext = nullptr;
// Export with allocation size non-zero
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
recreate_ahb();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-01874");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
reset_mem();
AHardwareBuffer_release(ahb);
reset_mem();
reset_img();
}
TEST_F(VkLayerTest, AndroidHardwareBufferCreateYCbCrSampler) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer YCbCr sampler creation.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE;
VkSamplerYcbcrConversionCreateInfo sycci = {};
sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
sycci.format = VK_FORMAT_UNDEFINED;
sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904");
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
m_errorMonitor->VerifyFound();
VkExternalFormatANDROID efa = {};
efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa.externalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM;
sycci.format = VK_FORMAT_R8G8B8A8_UNORM;
sycci.pNext = &efa;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904");
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, AndroidHardwareBufferPhysDevImageFormatProp2) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer GetPhysicalDeviceImageFormatProperties.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping test\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
if ((m_instance_api_version < VK_API_VERSION_1_1) &&
!InstanceExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s %s extension not supported, skipping test\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
VkImageFormatProperties2 ifp = {};
ifp.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
VkPhysicalDeviceImageFormatInfo2 pdifi = {};
pdifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
pdifi.format = VK_FORMAT_R8G8B8A8_UNORM;
pdifi.tiling = VK_IMAGE_TILING_OPTIMAL;
pdifi.type = VK_IMAGE_TYPE_2D;
pdifi.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
VkAndroidHardwareBufferUsageANDROID ahbu = {};
ahbu.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID;
ahbu.androidHardwareBufferUsage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ifp.pNext = &ahbu;
// AHB_usage chained to input without a matching external image format struc chained to output
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868");
vk::GetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &pdifi, &ifp);
m_errorMonitor->VerifyFound();
// output struct chained, but does not include VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID usage
VkPhysicalDeviceExternalImageFormatInfo pdeifi = {};
pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
pdifi.pNext = &pdeifi;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868");
vk::GetPhysicalDeviceImageFormatProperties2(m_device->phy().handle(), &pdifi, &ifp);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, AndroidHardwareBufferCreateImageView) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer image view creation.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
// Allocate an AHB and fetch its properties
AHardwareBuffer *ahb = nullptr;
AHardwareBuffer_Desc ahb_desc = {};
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 64;
ahb_desc.layers = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
// Retrieve AHB properties to make it's external format 'known'
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props = {};
ahb_fmt_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_fmt_props;
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID");
ASSERT_TRUE(pfn_GetAHBProps != nullptr);
pfn_GetAHBProps(dev, ahb, &ahb_props);
AHardwareBuffer_release(ahb);
// Give image an external format
VkExternalFormatANDROID efa = {};
efa.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa.externalFormat = ahb_fmt_props.externalFormat;
ahb_desc.format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
ahb_desc.width = 64;
ahb_desc.height = 1;
ahb_desc.layers = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
// Create another VkExternalFormatANDROID for test VUID-VkImageViewCreateInfo-image-02400
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_fmt_props_Ycbcr = {};
ahb_fmt_props_Ycbcr.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props_Ycbcr = {};
ahb_props_Ycbcr.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props_Ycbcr.pNext = &ahb_fmt_props_Ycbcr;
pfn_GetAHBProps(dev, ahb, &ahb_props_Ycbcr);
AHardwareBuffer_release(ahb);
VkExternalFormatANDROID efa_Ycbcr = {};
efa_Ycbcr.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID;
efa_Ycbcr.externalFormat = ahb_fmt_props_Ycbcr.externalFormat;
// Create the image
VkImage img = VK_NULL_HANDLE;
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.pNext = &efa;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {64, 64, 1};
ici.format = VK_FORMAT_UNDEFINED;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
vk::CreateImage(dev, &ici, NULL, &img);
// Set up memory allocation
VkDeviceMemory img_mem = VK_NULL_HANDLE;
VkMemoryAllocateInfo mai = {};
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mai.allocationSize = 64 * 64 * 4;
mai.memoryTypeIndex = 0;
vk::AllocateMemory(dev, &mai, NULL, &img_mem);
// It shouldn't use vk::GetImageMemoryRequirements for AndroidHardwareBuffer.
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-vkBindImageMemory-invalid-requirements");
VkMemoryRequirements img_mem_reqs = {};
vk::GetImageMemoryRequirements(m_device->device(), img, &img_mem_reqs);
vk::BindImageMemory(dev, img, img_mem, 0);
m_errorMonitor->VerifyFound();
// Bind image to memory
vk::DestroyImage(dev, img, NULL);
vk::FreeMemory(dev, img_mem, NULL);
vk::CreateImage(dev, &ici, NULL, &img);
vk::AllocateMemory(dev, &mai, NULL, &img_mem);
vk::BindImageMemory(dev, img, img_mem, 0);
// Create a YCbCr conversion, with different external format, chain to view
VkSamplerYcbcrConversion ycbcr_conv = VK_NULL_HANDLE;
VkSamplerYcbcrConversionCreateInfo sycci = {};
sycci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
sycci.pNext = &efa_Ycbcr;
sycci.format = VK_FORMAT_UNDEFINED;
sycci.ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
sycci.ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
VkSamplerYcbcrConversionInfo syci = {};
syci.sType = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO;
syci.conversion = ycbcr_conv;
// Create a view
VkImageView image_view = VK_NULL_HANDLE;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.pNext = &syci;
ivci.image = img;
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_UNDEFINED;
ivci.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
auto reset_view = [&image_view, dev]() {
if (VK_NULL_HANDLE != image_view) vk::DestroyImageView(dev, image_view, NULL);
image_view = VK_NULL_HANDLE;
};
// Up to this point, no errors expected
m_errorMonitor->VerifyNotFound();
// Chained ycbcr conversion has different (external) format than image
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02400");
// Also causes "unsupported format" - should be removed in future spec update
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-None-02273");
vk::CreateImageView(dev, &ivci, NULL, &image_view);
m_errorMonitor->VerifyFound();
reset_view();
vk::DestroySamplerYcbcrConversion(dev, ycbcr_conv, NULL);
sycci.pNext = &efa;
vk::CreateSamplerYcbcrConversion(dev, &sycci, NULL, &ycbcr_conv);
syci.conversion = ycbcr_conv;
// View component swizzle not IDENTITY
ivci.components.r = VK_COMPONENT_SWIZZLE_B;
ivci.components.b = VK_COMPONENT_SWIZZLE_R;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02401");
// Also causes "unsupported format" - should be removed in future spec update
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-None-02273");
vk::CreateImageView(dev, &ivci, NULL, &image_view);
m_errorMonitor->VerifyFound();
reset_view();
ivci.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
ivci.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
// View with external format, when format is not UNDEFINED
ivci.format = VK_FORMAT_R5G6B5_UNORM_PACK16;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-02399");
// Also causes "view format different from image format"
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImageViewCreateInfo-image-01019");
vk::CreateImageView(dev, &ivci, NULL, &image_view);
m_errorMonitor->VerifyFound();
reset_view();
vk::DestroySamplerYcbcrConversion(dev, ycbcr_conv, NULL);
vk::DestroyImageView(dev, image_view, NULL);
vk::DestroyImage(dev, img, NULL);
vk::FreeMemory(dev, img_mem, NULL);
}
TEST_F(VkLayerTest, AndroidHardwareBufferImportBuffer) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer import as buffer.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkDeviceMemory mem_handle = VK_NULL_HANDLE;
auto reset_mem = [&mem_handle, dev]() {
if (VK_NULL_HANDLE != mem_handle) vk::FreeMemory(dev, mem_handle, NULL);
mem_handle = VK_NULL_HANDLE;
};
PFN_vkGetAndroidHardwareBufferPropertiesANDROID pfn_GetAHBProps =
(PFN_vkGetAndroidHardwareBufferPropertiesANDROID)vk::GetDeviceProcAddr(dev, "vkGetAndroidHardwareBufferPropertiesANDROID");
ASSERT_TRUE(pfn_GetAHBProps != nullptr);
// AHB structs
AHardwareBuffer *ahb = nullptr;
AHardwareBuffer_Desc ahb_desc = {};
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
VkImportAndroidHardwareBufferInfoANDROID iahbi = {};
iahbi.sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
// Allocate an AHardwareBuffer
ahb_desc.format = AHARDWAREBUFFER_FORMAT_BLOB;
ahb_desc.usage = AHARDWAREBUFFER_USAGE_SENSOR_DIRECT_DATA;
ahb_desc.width = 512;
ahb_desc.height = 1;
ahb_desc.layers = 1;
AHardwareBuffer_allocate(&ahb_desc, &ahb);
m_errorMonitor->SetUnexpectedError("VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884");
pfn_GetAHBProps(dev, ahb, &ahb_props);
iahbi.buffer = ahb;
// Create export and import buffers
VkExternalMemoryBufferCreateInfo ext_buf_info = {};
ext_buf_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR;
ext_buf_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
VkBufferCreateInfo bci = {};
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bci.pNext = &ext_buf_info;
bci.size = ahb_props.allocationSize;
bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
VkBuffer buf = VK_NULL_HANDLE;
vk::CreateBuffer(dev, &bci, NULL, &buf);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(dev, buf, &mem_reqs);
// Allocation info
VkMemoryAllocateInfo mai = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, mem_reqs, 0);
mai.pNext = &iahbi; // Chained import struct
VkPhysicalDeviceMemoryProperties memory_info;
vk::GetPhysicalDeviceMemoryProperties(gpu(), &memory_info);
unsigned int i;
for (i = 0; i < memory_info.memoryTypeCount; i++) {
if ((ahb_props.memoryTypeBits & (1 << i))) {
mai.memoryTypeIndex = i;
break;
}
}
if (i >= memory_info.memoryTypeCount) {
printf("%s No invalid memory type index could be found; skipped.\n", kSkipPrefix);
AHardwareBuffer_release(ahb);
reset_mem();
vk::DestroyBuffer(dev, buf, NULL);
return;
}
// Import as buffer requires format AHB_FMT_BLOB and usage AHB_USAGE_GPU_DATA_BUFFER
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881");
// Also causes "non-dedicated allocation format/usage" error
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryAllocateInfo-pNext-02384");
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
m_errorMonitor->VerifyFound();
AHardwareBuffer_release(ahb);
reset_mem();
vk::DestroyBuffer(dev, buf, NULL);
}
TEST_F(VkLayerTest, AndroidHardwareBufferExporttBuffer) {
TEST_DESCRIPTION("Verify AndroidHardwareBuffer export memory as AHB.");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if ((DeviceExtensionSupported(gpu(), nullptr, VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) &&
// Also skip on devices that advertise AHB, but not the pre-requisite foreign_queue extension
(DeviceExtensionSupported(gpu(), nullptr, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME))) {
m_device_extension_names.push_back(VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
m_device_extension_names.push_back(VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME);
} else {
printf("%s %s extension not supported, skipping tests\n", kSkipPrefix,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDevice dev = m_device->device();
VkDeviceMemory mem_handle = VK_NULL_HANDLE;
// Allocate device memory, no linked export struct indicating AHB handle type
VkMemoryAllocateInfo mai = {};
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mai.allocationSize = 65536;
mai.memoryTypeIndex = 0;
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
PFN_vkGetMemoryAndroidHardwareBufferANDROID pfn_GetMemAHB =
(PFN_vkGetMemoryAndroidHardwareBufferANDROID)vk::GetDeviceProcAddr(dev, "vkGetMemoryAndroidHardwareBufferANDROID");
ASSERT_TRUE(pfn_GetMemAHB != nullptr);
VkMemoryGetAndroidHardwareBufferInfoANDROID mgahbi = {};
mgahbi.sType = VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID;
mgahbi.memory = mem_handle;
AHardwareBuffer *ahb = nullptr;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882");
pfn_GetMemAHB(dev, &mgahbi, &ahb);
m_errorMonitor->VerifyFound();
if (ahb) AHardwareBuffer_release(ahb);
ahb = nullptr;
if (VK_NULL_HANDLE != mem_handle) vk::FreeMemory(dev, mem_handle, NULL);
mem_handle = VK_NULL_HANDLE;
// Add an export struct with AHB handle type to allocation info
VkExportMemoryAllocateInfo emai = {};
emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
emai.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
mai.pNext = &emai;
// Create an image, do not bind memory
VkImage img = VK_NULL_HANDLE;
VkImageCreateInfo ici = {};
ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ici.imageType = VK_IMAGE_TYPE_2D;
ici.arrayLayers = 1;
ici.extent = {128, 128, 1};
ici.format = VK_FORMAT_R8G8B8A8_UNORM;
ici.mipLevels = 1;
ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ici.samples = VK_SAMPLE_COUNT_1_BIT;
ici.tiling = VK_IMAGE_TILING_OPTIMAL;
ici.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
vk::CreateImage(dev, &ici, NULL, &img);
ASSERT_TRUE(VK_NULL_HANDLE != img);
// Add image to allocation chain as dedicated info, re-allocate
VkMemoryDedicatedAllocateInfo mdai = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO};
mdai.image = img;
emai.pNext = &mdai;
mai.allocationSize = 0;
vk::AllocateMemory(dev, &mai, NULL, &mem_handle);
mgahbi.memory = mem_handle;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883");
pfn_GetMemAHB(dev, &mgahbi, &ahb);
m_errorMonitor->VerifyFound();
if (ahb) AHardwareBuffer_release(ahb);
if (VK_NULL_HANDLE != mem_handle) vk::FreeMemory(dev, mem_handle, NULL);
vk::DestroyImage(dev, img, NULL);
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
TEST_F(VkLayerTest, ValidateStride) {
TEST_DESCRIPTION("Validate Stride.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_ci.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_commandBuffer->begin();
vk::CmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
vk::CmdWriteTimestamp(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, query_pool, 0);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
char data_space;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-flags-02827");
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, 1, VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-flags-00815");
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, 1,
(VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT));
m_errorMonitor->VerifyFound();
char data_space4[4] = "";
m_errorMonitor->ExpectSuccess();
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space4), &data_space4, 4, VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyNotFound();
char data_space8[8] = "";
m_errorMonitor->ExpectSuccess();
vk::GetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space8), &data_space8, 8,
(VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT));
m_errorMonitor->VerifyNotFound();
uint32_t qfi = 0;
VkBufferCreateInfo buff_create_info = {};
buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buff_create_info.size = 128;
buff_create_info.usage =
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
buff_create_info.queueFamilyIndexCount = 1;
buff_create_info.pQueueFamilyIndices = &qfi;
VkBufferObj buffer;
buffer.init(*m_device, buff_create_info);
m_commandBuffer->reset();
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-flags-00822");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 1, 1, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyQueryPoolResults-flags-00823");
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 1, 1, VK_QUERY_RESULT_64_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess();
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 4, 4, 0);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess();
vk::CmdCopyQueryPoolResults(m_commandBuffer->handle(), query_pool, 0, 1, buffer.handle(), 8, 8, VK_QUERY_RESULT_64_BIT);
m_errorMonitor->VerifyNotFound();
if (m_device->phy().features().multiDrawIndirect) {
CreatePipelineHelper helper(*this);
helper.InitInfo();
helper.InitState();
helper.CreateGraphicsPipeline();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndirect-drawCount-00476");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndirect-drawCount-00488");
vk::CmdDrawIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 100, 2);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess();
vk::CmdDrawIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 2, 24);
m_errorMonitor->VerifyNotFound();
vk::CmdBindIndexBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_INDEX_TYPE_UINT16);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndexedIndirect-drawCount-00528");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndexedIndirect-drawCount-00540");
vk::CmdDrawIndexedIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 100, 2);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess();
vk::CmdDrawIndexedIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 2, 24);
m_errorMonitor->VerifyNotFound();
vk::CmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
} else {
printf("%s Test requires unsupported multiDrawIndirect feature. Skipped.\n", kSkipPrefix);
}
vk::DestroyQueryPool(m_device->handle(), query_pool, NULL);
}
TEST_F(VkLayerTest, WarningSwapchainCreateInfoPreTransform) {
TEST_DESCRIPTION("Print warning when preTransform doesn't match curretTransform");
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->SetDesiredFailureMsg(kPerformanceWarningBit, "UNASSIGNED-CoreValidation-SwapchainPreTransform");
m_errorMonitor->SetUnexpectedError("VUID-VkSwapchainCreateInfoKHR-preTransform-01279");
InitSwapchain(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR);
m_errorMonitor->VerifyFound();
DestroySwapchain();
}
bool InitFrameworkForRayTracingTest(VkRenderFramework *renderFramework, std::vector<const char *> &instance_extension_names,
std::vector<const char *> &device_extension_names, void *user_data,
bool need_gpu_validation = false, bool need_push_descriptors = false) {
const std::array<const char *, 1> required_instance_extensions = {VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME};
for (const char *required_instance_extension : required_instance_extensions) {
if (renderFramework->InstanceExtensionSupported(required_instance_extension)) {
instance_extension_names.push_back(required_instance_extension);
} else {
printf("%s %s instance extension not supported, skipping test\n", kSkipPrefix, required_instance_extension);
return false;
}
}
VkValidationFeatureEnableEXT enables[] = {VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT};
VkValidationFeaturesEXT features = {};
features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
features.enabledValidationFeatureCount = 1;
features.pEnabledValidationFeatures = enables;
VkValidationFeaturesEXT *enabled_features = need_gpu_validation ? &features : nullptr;
renderFramework->InitFramework(user_data, enabled_features);
if (renderFramework->DeviceIsMockICD() || renderFramework->DeviceSimulation()) {
printf("%s Test not supported by MockICD, skipping tests\n", kSkipPrefix);
return false;
}
std::vector<const char *> required_device_extensions = {
VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
VK_NV_RAY_TRACING_EXTENSION_NAME,
};
if (need_push_descriptors) {
required_device_extensions.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
}
for (const char *required_device_extension : required_device_extensions) {
if (renderFramework->DeviceExtensionSupported(renderFramework->gpu(), nullptr, required_device_extension)) {
device_extension_names.push_back(required_device_extension);
} else {
printf("%s %s device extension not supported, skipping test\n", kSkipPrefix, required_device_extension);
return false;
}
}
renderFramework->InitState();
return true;
}
TEST_F(VkLayerTest, ValidateGeometryNV) {
TEST_DESCRIPTION("Validate acceleration structure geometries.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
VkBufferObj vbo;
vbo.init(*m_device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
VkBufferObj ibo;
ibo.init(*m_device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
VkBufferObj tbo;
tbo.init(*m_device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
VkBufferObj aabbbo;
aabbbo.init(*m_device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
VkBufferCreateInfo unbound_buffer_ci = {};
unbound_buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
unbound_buffer_ci.size = 1024;
unbound_buffer_ci.usage = VK_BUFFER_USAGE_RAY_TRACING_BIT_NV;
VkBufferObj unbound_buffer;
unbound_buffer.init_no_mem(*m_device, unbound_buffer_ci);
const std::vector<float> vertices = {1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 0.0f};
const std::vector<uint32_t> indicies = {0, 1, 2};
const std::vector<float> aabbs = {0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f};
const std::vector<float> transforms = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f};
uint8_t *mapped_vbo_buffer_data = (uint8_t *)vbo.memory().map();
std::memcpy(mapped_vbo_buffer_data, (uint8_t *)vertices.data(), sizeof(float) * vertices.size());
vbo.memory().unmap();
uint8_t *mapped_ibo_buffer_data = (uint8_t *)ibo.memory().map();
std::memcpy(mapped_ibo_buffer_data, (uint8_t *)indicies.data(), sizeof(uint32_t) * indicies.size());
ibo.memory().unmap();
uint8_t *mapped_tbo_buffer_data = (uint8_t *)tbo.memory().map();
std::memcpy(mapped_tbo_buffer_data, (uint8_t *)transforms.data(), sizeof(float) * transforms.size());
tbo.memory().unmap();
uint8_t *mapped_aabbbo_buffer_data = (uint8_t *)aabbbo.memory().map();
std::memcpy(mapped_aabbbo_buffer_data, (uint8_t *)aabbs.data(), sizeof(float) * aabbs.size());
aabbbo.memory().unmap();
VkGeometryNV valid_geometry_triangles = {};
valid_geometry_triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_NV;
valid_geometry_triangles.geometryType = VK_GEOMETRY_TYPE_TRIANGLES_NV;
valid_geometry_triangles.geometry.triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV;
valid_geometry_triangles.geometry.triangles.vertexData = vbo.handle();
valid_geometry_triangles.geometry.triangles.vertexOffset = 0;
valid_geometry_triangles.geometry.triangles.vertexCount = 3;
valid_geometry_triangles.geometry.triangles.vertexStride = 12;
valid_geometry_triangles.geometry.triangles.vertexFormat = VK_FORMAT_R32G32B32_SFLOAT;
valid_geometry_triangles.geometry.triangles.indexData = ibo.handle();
valid_geometry_triangles.geometry.triangles.indexOffset = 0;
valid_geometry_triangles.geometry.triangles.indexCount = 3;
valid_geometry_triangles.geometry.triangles.indexType = VK_INDEX_TYPE_UINT32;
valid_geometry_triangles.geometry.triangles.transformData = tbo.handle();
valid_geometry_triangles.geometry.triangles.transformOffset = 0;
valid_geometry_triangles.geometry.aabbs = {};
valid_geometry_triangles.geometry.aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV;
VkGeometryNV valid_geometry_aabbs = {};
valid_geometry_aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_NV;
valid_geometry_aabbs.geometryType = VK_GEOMETRY_TYPE_AABBS_NV;
valid_geometry_aabbs.geometry.triangles = {};
valid_geometry_aabbs.geometry.triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV;
valid_geometry_aabbs.geometry.aabbs = {};
valid_geometry_aabbs.geometry.aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV;
valid_geometry_aabbs.geometry.aabbs.aabbData = aabbbo.handle();
valid_geometry_aabbs.geometry.aabbs.numAABBs = 1;
valid_geometry_aabbs.geometry.aabbs.offset = 0;
valid_geometry_aabbs.geometry.aabbs.stride = 24;
PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV = reinterpret_cast<PFN_vkCreateAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCreateAccelerationStructureNV"));
assert(vkCreateAccelerationStructureNV != nullptr);
const auto GetCreateInfo = [](const VkGeometryNV &geometry) {
VkAccelerationStructureCreateInfoNV as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
as_create_info.info.instanceCount = 0;
as_create_info.info.geometryCount = 1;
as_create_info.info.pGeometries = &geometry;
return as_create_info;
};
VkAccelerationStructureNV as;
// Invalid vertex format.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexFormat = VK_FORMAT_R64_UINT;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexFormat-02430");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid vertex offset - not multiple of component size.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexOffset = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexOffset-02429");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid vertex offset - bigger than buffer.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexOffset = 12 * 1024;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexOffset-02428");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid vertex buffer - no such buffer.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexData = VkBuffer(123456789);
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexData-parameter");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid vertex buffer - no memory bound.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.vertexData = unbound_buffer.handle();
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-vertexOffset-02428");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid index offset - not multiple of index size.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.indexOffset = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-indexOffset-02432");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid index offset - bigger than buffer.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.indexOffset = 2048;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-indexOffset-02431");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid index count - must be 0 if type is VK_INDEX_TYPE_NONE_NV.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.indexType = VK_INDEX_TYPE_NONE_NV;
geometry.geometry.triangles.indexData = VK_NULL_HANDLE;
geometry.geometry.triangles.indexCount = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-indexCount-02436");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid index data - must be VK_NULL_HANDLE if type is VK_INDEX_TYPE_NONE_NV.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.indexType = VK_INDEX_TYPE_NONE_NV;
geometry.geometry.triangles.indexData = ibo.handle();
geometry.geometry.triangles.indexCount = 0;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-indexData-02434");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid transform offset - not multiple of 16.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.transformOffset = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-transformOffset-02438");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid transform offset - bigger than buffer.
{
VkGeometryNV geometry = valid_geometry_triangles;
geometry.geometry.triangles.transformOffset = 2048;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryTrianglesNV-transformOffset-02437");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid aabb offset - not multiple of 8.
{
VkGeometryNV geometry = valid_geometry_aabbs;
geometry.geometry.aabbs.offset = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryAABBNV-offset-02440");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid aabb offset - bigger than buffer.
{
VkGeometryNV geometry = valid_geometry_aabbs;
geometry.geometry.aabbs.offset = 8 * 1024;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryAABBNV-offset-02439");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Invalid aabb stride - not multiple of 8.
{
VkGeometryNV geometry = valid_geometry_aabbs;
geometry.geometry.aabbs.stride = 1;
VkAccelerationStructureCreateInfoNV as_create_info = GetCreateInfo(geometry);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGeometryAABBNV-stride-02441");
vkCreateAccelerationStructureNV(m_device->handle(), &as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
}
void GetSimpleGeometryForAccelerationStructureTests(const VkDeviceObj &device, VkBufferObj *vbo, VkBufferObj *ibo,
VkGeometryNV *geometry) {
vbo->init(device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
ibo->init(device, 1024, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
const std::vector<float> vertices = {1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 0.0f};
const std::vector<uint32_t> indicies = {0, 1, 2};
uint8_t *mapped_vbo_buffer_data = (uint8_t *)vbo->memory().map();
std::memcpy(mapped_vbo_buffer_data, (uint8_t *)vertices.data(), sizeof(float) * vertices.size());
vbo->memory().unmap();
uint8_t *mapped_ibo_buffer_data = (uint8_t *)ibo->memory().map();
std::memcpy(mapped_ibo_buffer_data, (uint8_t *)indicies.data(), sizeof(uint32_t) * indicies.size());
ibo->memory().unmap();
*geometry = {};
geometry->sType = VK_STRUCTURE_TYPE_GEOMETRY_NV;
geometry->geometryType = VK_GEOMETRY_TYPE_TRIANGLES_NV;
geometry->geometry.triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV;
geometry->geometry.triangles.vertexData = vbo->handle();
geometry->geometry.triangles.vertexOffset = 0;
geometry->geometry.triangles.vertexCount = 3;
geometry->geometry.triangles.vertexStride = 12;
geometry->geometry.triangles.vertexFormat = VK_FORMAT_R32G32B32_SFLOAT;
geometry->geometry.triangles.indexData = ibo->handle();
geometry->geometry.triangles.indexOffset = 0;
geometry->geometry.triangles.indexCount = 3;
geometry->geometry.triangles.indexType = VK_INDEX_TYPE_UINT32;
geometry->geometry.triangles.transformData = VK_NULL_HANDLE;
geometry->geometry.triangles.transformOffset = 0;
geometry->geometry.aabbs = {};
geometry->geometry.aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV;
}
TEST_F(VkLayerTest, ValidateCreateAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure creation.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV = reinterpret_cast<PFN_vkCreateAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCreateAccelerationStructureNV"));
assert(vkCreateAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
VkAccelerationStructureNV as = VK_NULL_HANDLE;
// Top level can not have geometry
{
VkAccelerationStructureCreateInfoNV bad_top_level_create_info = as_create_info;
bad_top_level_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
bad_top_level_create_info.info.instanceCount = 0;
bad_top_level_create_info.info.geometryCount = 1;
bad_top_level_create_info.info.pGeometries = &geometry;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureInfoNV-type-02425");
vkCreateAccelerationStructureNV(m_device->handle(), &bad_top_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Bot level can not have instances
{
VkAccelerationStructureCreateInfoNV bad_bot_level_create_info = as_create_info;
bad_bot_level_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bad_bot_level_create_info.info.instanceCount = 1;
bad_bot_level_create_info.info.geometryCount = 0;
bad_bot_level_create_info.info.pGeometries = nullptr;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureInfoNV-type-02426");
vkCreateAccelerationStructureNV(m_device->handle(), &bad_bot_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not prefer both fast trace and fast build
{
VkAccelerationStructureCreateInfoNV bad_flags_level_create_info = as_create_info;
bad_flags_level_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bad_flags_level_create_info.info.instanceCount = 0;
bad_flags_level_create_info.info.geometryCount = 1;
bad_flags_level_create_info.info.pGeometries = &geometry;
bad_flags_level_create_info.info.flags =
VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV | VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureInfoNV-flags-02592");
vkCreateAccelerationStructureNV(m_device->handle(), &bad_flags_level_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not have geometry or instance for compacting
{
VkAccelerationStructureCreateInfoNV bad_compacting_as_create_info = as_create_info;
bad_compacting_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bad_compacting_as_create_info.info.instanceCount = 0;
bad_compacting_as_create_info.info.geometryCount = 1;
bad_compacting_as_create_info.info.pGeometries = &geometry;
bad_compacting_as_create_info.info.flags = 0;
bad_compacting_as_create_info.compactedSize = 1024;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkAccelerationStructureCreateInfoNV-compactedSize-02421");
vkCreateAccelerationStructureNV(m_device->handle(), &bad_compacting_as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
// Can not mix different geometry types into single bottom level acceleration structure
{
VkGeometryNV aabb_geometry = {};
aabb_geometry.sType = VK_STRUCTURE_TYPE_GEOMETRY_NV;
aabb_geometry.geometryType = VK_GEOMETRY_TYPE_AABBS_NV;
aabb_geometry.geometry.triangles.sType = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV;
aabb_geometry.geometry.aabbs = {};
aabb_geometry.geometry.aabbs.sType = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV;
// Buffer contents do not matter for this test.
aabb_geometry.geometry.aabbs.aabbData = geometry.geometry.triangles.vertexData;
aabb_geometry.geometry.aabbs.numAABBs = 1;
aabb_geometry.geometry.aabbs.offset = 0;
aabb_geometry.geometry.aabbs.stride = 24;
std::vector<VkGeometryNV> geometries = {geometry, aabb_geometry};
VkAccelerationStructureCreateInfoNV mix_geometry_types_as_create_info = as_create_info;
mix_geometry_types_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
mix_geometry_types_as_create_info.info.instanceCount = 0;
mix_geometry_types_as_create_info.info.geometryCount = static_cast<uint32_t>(geometries.size());
mix_geometry_types_as_create_info.info.pGeometries = geometries.data();
mix_geometry_types_as_create_info.info.flags = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-VkAccelerationStructureInfoNV-pGeometries-XXXX");
vkCreateAccelerationStructureNV(m_device->handle(), &mix_geometry_types_as_create_info, nullptr, &as);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateBindAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure binding.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV =
reinterpret_cast<PFN_vkBindAccelerationStructureMemoryNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkBindAccelerationStructureMemoryNV"));
assert(vkBindAccelerationStructureMemoryNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
as_create_info.info.geometryCount = 1;
as_create_info.info.pGeometries = &geometry;
as_create_info.info.instanceCount = 0;
VkAccelerationStructureObj as(*m_device, as_create_info, false);
m_errorMonitor->VerifyNotFound();
VkMemoryRequirements as_memory_requirements = as.memory_requirements().memoryRequirements;
VkBindAccelerationStructureMemoryInfoNV as_bind_info = {};
as_bind_info.sType = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV;
as_bind_info.accelerationStructure = as.handle();
VkMemoryAllocateInfo as_memory_alloc = {};
as_memory_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
as_memory_alloc.allocationSize = as_memory_requirements.size;
ASSERT_TRUE(m_device->phy().set_memory_type(as_memory_requirements.memoryTypeBits, &as_memory_alloc, 0));
// Can not bind already freed memory
{
VkDeviceMemory as_memory_freed = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_freed));
vk::FreeMemory(device(), as_memory_freed, NULL);
VkBindAccelerationStructureMemoryInfoNV as_bind_info_freed = as_bind_info;
as_bind_info_freed.memory = as_memory_freed;
as_bind_info_freed.memoryOffset = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoNV-memory-parameter");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_freed);
m_errorMonitor->VerifyFound();
}
// Can not bind with bad alignment
if (as_memory_requirements.alignment > 1) {
VkMemoryAllocateInfo as_memory_alloc_bad_alignment = as_memory_alloc;
as_memory_alloc_bad_alignment.allocationSize += 1;
VkDeviceMemory as_memory_bad_alignment = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc_bad_alignment, NULL, &as_memory_bad_alignment));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_bad_alignment = as_bind_info;
as_bind_info_bad_alignment.memory = as_memory_bad_alignment;
as_bind_info_bad_alignment.memoryOffset = 1;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-02594");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_bad_alignment);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_bad_alignment, NULL);
}
// Can not bind with offset outside the allocation
{
VkDeviceMemory as_memory_bad_offset = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_bad_offset));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_bad_offset = as_bind_info;
as_bind_info_bad_offset.memory = as_memory_bad_offset;
as_bind_info_bad_offset.memoryOffset =
(as_memory_alloc.allocationSize + as_memory_requirements.alignment) & ~(as_memory_requirements.alignment - 1);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-02451");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_bad_offset);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_bad_offset, NULL);
}
// Can not bind with offset that doesn't leave enough size
{
VkDeviceSize offset = (as_memory_requirements.size - 1) & ~(as_memory_requirements.alignment - 1);
if (offset > 0 && (as_memory_requirements.size < (as_memory_alloc.allocationSize - as_memory_requirements.alignment))) {
VkDeviceMemory as_memory_bad_offset = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_bad_offset));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_bad_offset = as_bind_info;
as_bind_info_bad_offset.memory = as_memory_bad_offset;
as_bind_info_bad_offset.memoryOffset = offset;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoNV-size-02595");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_bad_offset);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_bad_offset, NULL);
}
}
// Can not bind with memory that has unsupported memory type
{
VkPhysicalDeviceMemoryProperties memory_properties = {};
vk::GetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memory_properties);
uint32_t supported_memory_type_bits = as_memory_requirements.memoryTypeBits;
uint32_t unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~supported_memory_type_bits;
if (unsupported_mem_type_bits != 0) {
VkMemoryAllocateInfo as_memory_alloc_bad_type = as_memory_alloc;
ASSERT_TRUE(m_device->phy().set_memory_type(unsupported_mem_type_bits, &as_memory_alloc_bad_type, 0));
VkDeviceMemory as_memory_bad_type = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc_bad_type, NULL, &as_memory_bad_type));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_bad_type = as_bind_info;
as_bind_info_bad_type.memory = as_memory_bad_type;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoNV-memory-02593");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_bad_type);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_bad_type, NULL);
}
}
// Can not bind memory twice
{
VkAccelerationStructureObj as_twice(*m_device, as_create_info, false);
VkDeviceMemory as_memory_twice_1 = VK_NULL_HANDLE;
VkDeviceMemory as_memory_twice_2 = VK_NULL_HANDLE;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_twice_1));
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &as_memory_alloc, NULL, &as_memory_twice_2));
VkBindAccelerationStructureMemoryInfoNV as_bind_info_twice_1 = as_bind_info;
VkBindAccelerationStructureMemoryInfoNV as_bind_info_twice_2 = as_bind_info;
as_bind_info_twice_1.accelerationStructure = as_twice.handle();
as_bind_info_twice_2.accelerationStructure = as_twice.handle();
as_bind_info_twice_1.memory = as_memory_twice_1;
as_bind_info_twice_2.memory = as_memory_twice_2;
ASSERT_VK_SUCCESS(vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_twice_1));
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-02450");
(void)vkBindAccelerationStructureMemoryNV(device(), 1, &as_bind_info_twice_2);
m_errorMonitor->VerifyFound();
vk::FreeMemory(device(), as_memory_twice_1, NULL);
vk::FreeMemory(device(), as_memory_twice_2, NULL);
}
}
TEST_F(VkLayerTest, ValidateCmdBuildAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure building.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
assert(vkCmdBuildAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {};
bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
bot_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
bot_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bot_level_as_create_info.info.instanceCount = 0;
bot_level_as_create_info.info.geometryCount = 1;
bot_level_as_create_info.info.pGeometries = &geometry;
VkAccelerationStructureObj bot_level_as(*m_device, bot_level_as_create_info);
m_errorMonitor->VerifyNotFound();
VkBufferObj bot_level_as_scratch;
bot_level_as.create_scratch_buffer(*m_device, &bot_level_as_scratch);
// Command buffer must be in recording state
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-recording");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->begin();
// Incompatible type
VkAccelerationStructureInfoNV as_build_info_with_incompatible_type = bot_level_as_create_info.info;
as_build_info_with_incompatible_type.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
as_build_info_with_incompatible_type.instanceCount = 1;
as_build_info_with_incompatible_type.geometryCount = 0;
// This is duplicated since it triggers one error for different types and one error for lower instance count - the
// build info is incompatible but still needs to be valid to get past the stateless checks.
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &as_build_info_with_incompatible_type, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// Incompatible flags
VkAccelerationStructureInfoNV as_build_info_with_incompatible_flags = bot_level_as_create_info.info;
as_build_info_with_incompatible_flags.flags = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &as_build_info_with_incompatible_flags, VK_NULL_HANDLE, 0,
VK_FALSE, bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// Incompatible build size
VkGeometryNV geometry_with_more_vertices = geometry;
geometry_with_more_vertices.geometry.triangles.vertexCount += 1;
VkAccelerationStructureInfoNV as_build_info_with_incompatible_geometry = bot_level_as_create_info.info;
as_build_info_with_incompatible_geometry.pGeometries = &geometry_with_more_vertices;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &as_build_info_with_incompatible_geometry, VK_NULL_HANDLE, 0,
VK_FALSE, bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// Scratch buffer too small
VkBufferCreateInfo too_small_scratch_buffer_info = {};
too_small_scratch_buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
too_small_scratch_buffer_info.usage = VK_BUFFER_USAGE_RAY_TRACING_BIT_NV;
too_small_scratch_buffer_info.size = 1;
VkBufferObj too_small_scratch_buffer(*m_device, too_small_scratch_buffer_info);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-update-02491");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, too_small_scratch_buffer.handle(), 0);
m_errorMonitor->VerifyFound();
// Scratch buffer with offset too small
VkDeviceSize scratch_buffer_offset = 5;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-update-02491");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), scratch_buffer_offset);
m_errorMonitor->VerifyFound();
// Src must have been built before
VkAccelerationStructureObj bot_level_as_updated(*m_device, bot_level_as_create_info);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-update-02489");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_TRUE,
bot_level_as_updated.handle(), bot_level_as.handle(), bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
// Src must have been built before with the VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV flag
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBuildAccelerationStructureNV-update-02489");
vkCmdBuildAccelerationStructureNV(m_commandBuffer->handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_TRUE,
bot_level_as_updated.handle(), bot_level_as.handle(), bot_level_as_scratch.handle(), 0);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ValidateGetAccelerationStructureHandleNV) {
TEST_DESCRIPTION("Validate acceleration structure handle querying.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV =
reinterpret_cast<PFN_vkGetAccelerationStructureHandleNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkGetAccelerationStructureHandleNV"));
assert(vkGetAccelerationStructureHandleNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {};
bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
bot_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
bot_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bot_level_as_create_info.info.instanceCount = 0;
bot_level_as_create_info.info.geometryCount = 1;
bot_level_as_create_info.info.pGeometries = &geometry;
// Not enough space for the handle
{
VkAccelerationStructureObj bot_level_as(*m_device, bot_level_as_create_info);
m_errorMonitor->VerifyNotFound();
uint64_t handle = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetAccelerationStructureHandleNV-dataSize-02240");
vkGetAccelerationStructureHandleNV(m_device->handle(), bot_level_as.handle(), sizeof(uint8_t), &handle);
m_errorMonitor->VerifyFound();
}
// No memory bound to acceleration structure
{
VkAccelerationStructureObj bot_level_as(*m_device, bot_level_as_create_info, /*init_memory=*/false);
m_errorMonitor->VerifyNotFound();
uint64_t handle = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX");
vkGetAccelerationStructureHandleNV(m_device->handle(), bot_level_as.handle(), sizeof(uint64_t), &handle);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ValidateCmdCopyAccelerationStructureNV) {
TEST_DESCRIPTION("Validate acceleration structure copying.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor)) {
return;
}
PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV = reinterpret_cast<PFN_vkCmdCopyAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCmdCopyAccelerationStructureNV"));
assert(vkCmdCopyAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV as_create_info = {};
as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
as_create_info.info.instanceCount = 0;
as_create_info.info.geometryCount = 1;
as_create_info.info.pGeometries = &geometry;
VkAccelerationStructureObj src_as(*m_device, as_create_info);
VkAccelerationStructureObj dst_as(*m_device, as_create_info);
VkAccelerationStructureObj dst_as_without_mem(*m_device, as_create_info, false);
m_errorMonitor->VerifyNotFound();
// Command buffer must be in recording state
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-recording");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV);
m_errorMonitor->VerifyFound();
m_commandBuffer->begin();
// Src must have been created with allow compaction flag
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdCopyAccelerationStructureNV-src-02497");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV);
m_errorMonitor->VerifyFound();
// Dst must have been bound with memory
m_errorMonitor->SetDesiredFailureMsg(kErrorBit,
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
vkCmdCopyAccelerationStructureNV(m_commandBuffer->handle(), dst_as_without_mem.handle(), src_as.handle(),
VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, GpuBuildAccelerationStructureValidationInvalidHandle) {
TEST_DESCRIPTION(
"Acceleration structure gpu validation should report an invalid handle when trying to build a top level "
"acceleration structure with an invalid handle for a bottom level acceleration structure.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor,
/*need_gpu_validation=*/true)) {
return;
}
PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
assert(vkCmdBuildAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
top_level_as_create_info.info.instanceCount = 1;
top_level_as_create_info.info.geometryCount = 0;
VkCommandPoolObj command_pool(m_device, 0, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
struct VkGeometryInstanceNV {
float transform[12];
uint32_t instanceCustomIndex : 24;
uint32_t mask : 8;
uint32_t instanceOffset : 24;
uint32_t flags : 8;
uint64_t accelerationStructureHandle;
};
VkGeometryInstanceNV instance = {
{
// clang-format off
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
// clang-format on
},
0,
0xFF,
0,
VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV,
1234567890, // invalid
};
VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV);
VkBufferObj instance_buffer;
instance_buffer.init(*m_device, instance_buffer_size,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map();
std::memcpy(mapped_instance_buffer_data, (uint8_t *)&instance, static_cast<std::size_t>(instance_buffer_size));
instance_buffer.memory().unmap();
VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
m_errorMonitor->VerifyNotFound();
VkBufferObj top_level_as_scratch;
top_level_as.create_scratch_buffer(*m_device, &top_level_as_scratch);
VkCommandBufferObj command_buffer(m_device, &command_pool);
command_buffer.begin();
vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &top_level_as_create_info.info, instance_buffer.handle(), 0,
VK_FALSE, top_level_as.handle(), VK_NULL_HANDLE, top_level_as_scratch.handle(), 0);
command_buffer.end();
m_errorMonitor->SetDesiredFailureMsg(
kErrorBit, "Attempted to build top level acceleration structure using invalid bottom level acceleration structure handle");
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, GpuBuildAccelerationStructureValidationBottomLevelNotYetBuilt) {
TEST_DESCRIPTION(
"Acceleration structure gpu validation should report an invalid handle when trying to build a top level "
"acceleration structure with a handle for a bottom level acceleration structure that has not yet been built.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor,
/*need_gpu_validation=*/true)) {
return;
}
PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
assert(vkCmdBuildAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {};
bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
bot_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
bot_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bot_level_as_create_info.info.instanceCount = 0;
bot_level_as_create_info.info.geometryCount = 1;
bot_level_as_create_info.info.pGeometries = &geometry;
VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
top_level_as_create_info.info.instanceCount = 1;
top_level_as_create_info.info.geometryCount = 0;
VkCommandPoolObj command_pool(m_device, 0, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
struct VkGeometryInstanceNV {
float transform[12];
uint32_t instanceCustomIndex : 24;
uint32_t mask : 8;
uint32_t instanceOffset : 24;
uint32_t flags : 8;
uint64_t accelerationStructureHandle;
};
VkAccelerationStructureObj bot_level_as_never_built(*m_device, bot_level_as_create_info);
m_errorMonitor->VerifyNotFound();
VkGeometryInstanceNV instance = {
{
// clang-format off
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
// clang-format on
},
0,
0xFF,
0,
VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV,
bot_level_as_never_built.opaque_handle(),
};
VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV);
VkBufferObj instance_buffer;
instance_buffer.init(*m_device, instance_buffer_size,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map();
std::memcpy(mapped_instance_buffer_data, (uint8_t *)&instance, static_cast<std::size_t>(instance_buffer_size));
instance_buffer.memory().unmap();
VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
m_errorMonitor->VerifyNotFound();
VkBufferObj top_level_as_scratch;
top_level_as.create_scratch_buffer(*m_device, &top_level_as_scratch);
VkCommandBufferObj command_buffer(m_device, &command_pool);
command_buffer.begin();
vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &top_level_as_create_info.info, instance_buffer.handle(), 0,
VK_FALSE, top_level_as.handle(), VK_NULL_HANDLE, top_level_as_scratch.handle(), 0);
command_buffer.end();
m_errorMonitor->SetDesiredFailureMsg(
kErrorBit, "Attempted to build top level acceleration structure using invalid bottom level acceleration structure handle");
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, GpuBuildAccelerationStructureValidationBottomLevelDestroyed) {
TEST_DESCRIPTION(
"Acceleration structure gpu validation should report an invalid handle when trying to build a top level "
"acceleration structure with a handle for a destroyed bottom level acceleration structure.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor,
/*need_gpu_validation=*/true)) {
return;
}
PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
assert(vkCmdBuildAccelerationStructureNV != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV bot_level_as_create_info = {};
bot_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
bot_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
bot_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV;
bot_level_as_create_info.info.instanceCount = 0;
bot_level_as_create_info.info.geometryCount = 1;
bot_level_as_create_info.info.pGeometries = &geometry;
VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
top_level_as_create_info.info.instanceCount = 1;
top_level_as_create_info.info.geometryCount = 0;
VkCommandPoolObj command_pool(m_device, 0, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
struct VkGeometryInstanceNV {
float transform[12];
uint32_t instanceCustomIndex : 24;
uint32_t mask : 8;
uint32_t instanceOffset : 24;
uint32_t flags : 8;
uint64_t accelerationStructureHandle;
};
uint64_t destroyed_bot_level_as_handle = 0;
{
VkAccelerationStructureObj destroyed_bot_level_as(*m_device, bot_level_as_create_info);
m_errorMonitor->VerifyNotFound();
destroyed_bot_level_as_handle = destroyed_bot_level_as.opaque_handle();
VkBufferObj bot_level_as_scratch;
destroyed_bot_level_as.create_scratch_buffer(*m_device, &bot_level_as_scratch);
VkCommandBufferObj command_buffer(m_device, &command_pool);
command_buffer.begin();
vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &bot_level_as_create_info.info, VK_NULL_HANDLE, 0, VK_FALSE,
destroyed_bot_level_as.handle(), VK_NULL_HANDLE, bot_level_as_scratch.handle(), 0);
command_buffer.end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyNotFound();
// vk::DestroyAccelerationStructureNV called on destroyed_bot_level_as during destruction.
}
VkGeometryInstanceNV instance = {
{
// clang-format off
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
// clang-format on
},
0,
0xFF,
0,
VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV,
destroyed_bot_level_as_handle,
};
VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV);
VkBufferObj instance_buffer;
instance_buffer.init(*m_device, instance_buffer_size,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map();
std::memcpy(mapped_instance_buffer_data, (uint8_t *)&instance, static_cast<std::size_t>(instance_buffer_size));
instance_buffer.memory().unmap();
VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
m_errorMonitor->VerifyNotFound();
VkBufferObj top_level_as_scratch;
top_level_as.create_scratch_buffer(*m_device, &top_level_as_scratch);
VkCommandBufferObj command_buffer(m_device, &command_pool);
command_buffer.begin();
vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &top_level_as_create_info.info, instance_buffer.handle(), 0,
VK_FALSE, top_level_as.handle(), VK_NULL_HANDLE, top_level_as_scratch.handle(), 0);
command_buffer.end();
m_errorMonitor->SetDesiredFailureMsg(
kErrorBit, "Attempted to build top level acceleration structure using invalid bottom level acceleration structure handle");
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, GpuBuildAccelerationStructureValidationRestoresState) {
TEST_DESCRIPTION("Validate that acceleration structure gpu validation correctly restores compute state.");
if (!InitFrameworkForRayTracingTest(this, m_instance_extension_names, m_device_extension_names, m_errorMonitor,
/*need_gpu_validation=*/true, /*need_push_descriptors=*/true)) {
return;
}
PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV =
reinterpret_cast<PFN_vkCmdBuildAccelerationStructureNV>(
vk::GetDeviceProcAddr(m_device->handle(), "vkCmdBuildAccelerationStructureNV"));
assert(vkCmdBuildAccelerationStructureNV != nullptr);
PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR =
(PFN_vkCmdPushDescriptorSetKHR)vk::GetDeviceProcAddr(m_device->handle(), "vkCmdPushDescriptorSetKHR");
assert(vkCmdPushDescriptorSetKHR != nullptr);
VkBufferObj vbo;
VkBufferObj ibo;
VkGeometryNV geometry;
GetSimpleGeometryForAccelerationStructureTests(*m_device, &vbo, &ibo, &geometry);
VkAccelerationStructureCreateInfoNV top_level_as_create_info = {};
top_level_as_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV;
top_level_as_create_info.info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV;
top_level_as_create_info.info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV;
top_level_as_create_info.info.instanceCount = 1;
top_level_as_create_info.info.geometryCount = 0;
VkCommandPoolObj command_pool(m_device, 0, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
struct VkGeometryInstanceNV {
float transform[12];
uint32_t instanceCustomIndex : 24;
uint32_t mask : 8;
uint32_t instanceOffset : 24;
uint32_t flags : 8;
uint64_t accelerationStructureHandle;
};
VkGeometryInstanceNV instance = {
{
// clang-format off
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
// clang-format on
},
0,
0xFF,
0,
VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV,
1234567,
};
VkDeviceSize instance_buffer_size = sizeof(VkGeometryInstanceNV);
VkBufferObj instance_buffer;
instance_buffer.init(*m_device, instance_buffer_size,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_RAY_TRACING_BIT_NV);
uint8_t *mapped_instance_buffer_data = (uint8_t *)instance_buffer.memory().map();
std::memcpy(mapped_instance_buffer_data, (uint8_t *)&instance, static_cast<std::size_t>(instance_buffer_size));
instance_buffer.memory().unmap();
VkAccelerationStructureObj top_level_as(*m_device, top_level_as_create_info);
m_errorMonitor->VerifyNotFound();
VkBufferObj top_level_as_scratch;
top_level_as.create_scratch_buffer(*m_device, &top_level_as_scratch);
struct ComputeOutput {
uint32_t push_constant_value;
uint32_t push_descriptor_value;
uint32_t normal_descriptor_value;
};
VkBufferObj push_descriptor_buffer;
push_descriptor_buffer.init(*m_device, 4, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
VkBufferObj normal_descriptor_buffer;
normal_descriptor_buffer.init(*m_device, 4, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
VkDeviceSize output_descriptor_buffer_size = static_cast<VkDeviceSize>(sizeof(ComputeOutput));
VkBufferObj output_descriptor_buffer;
output_descriptor_buffer.init(*m_device, output_descriptor_buffer_size,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
const std::string cs_source = R"glsl(#version 450
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
layout(push_constant) uniform PushConstants { uint value; } push_constant;
layout(set = 0, binding = 0, std430) buffer PushDescriptorBuffer { uint value; } push_descriptor;
layout(set = 1, binding = 0, std430) buffer NormalDescriptorBuffer { uint value; } normal_descriptor;
layout(set = 2, binding = 0, std430) buffer ComputeOutputBuffer {
uint push_constant_value;
uint push_descriptor_value;
uint normal_descriptor_value;
} compute_output;
void main() {
compute_output.push_constant_value = push_constant.value;
compute_output.push_descriptor_value = push_descriptor.value;
compute_output.normal_descriptor_value = normal_descriptor.value;
}
)glsl";
VkShaderObj cs(m_device, cs_source.c_str(), VK_SHADER_STAGE_COMPUTE_BIT, this);
OneOffDescriptorSet push_descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
},
VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
OneOffDescriptorSet normal_descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
});
OneOffDescriptorSet output_descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
});
VkPushConstantRange push_constant_range = {};
push_constant_range.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
push_constant_range.size = 4;
push_constant_range.offset = 0;
const VkPipelineLayoutObj compute_pipeline_layout(m_device,
{
&push_descriptor_set.layout_,
&normal_descriptor_set.layout_,
&output_descriptor_set.layout_,
},
{push_constant_range});
VkComputePipelineCreateInfo compute_pipeline_ci = {};
compute_pipeline_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
compute_pipeline_ci.layout = compute_pipeline_layout.handle();
compute_pipeline_ci.stage = cs.GetStageCreateInfo();
VkPipeline compute_pipeline;
ASSERT_VK_SUCCESS(
vk::CreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &compute_pipeline_ci, nullptr, &compute_pipeline));
normal_descriptor_set.WriteDescriptorBufferInfo(0, normal_descriptor_buffer.handle(), 4, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
normal_descriptor_set.UpdateDescriptorSets();
output_descriptor_set.WriteDescriptorBufferInfo(0, output_descriptor_buffer.handle(), output_descriptor_buffer_size,
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
output_descriptor_set.UpdateDescriptorSets();
// Set input data
const uint32_t push_constant_value = 1234567890;
const uint32_t push_descriptor_value = 98765432;
const uint32_t normal_descriptor_value = 1111111;
uint32_t *mapped_push_descriptor_buffer_data = (uint32_t *)push_descriptor_buffer.memory().map();
*mapped_push_descriptor_buffer_data = push_descriptor_value;
push_descriptor_buffer.memory().unmap();
uint32_t *mapped_normal_descriptor_buffer_data = (uint32_t *)normal_descriptor_buffer.memory().map();
*mapped_normal_descriptor_buffer_data = normal_descriptor_value;
normal_descriptor_buffer.memory().unmap();
ComputeOutput *mapped_output_buffer_data = (ComputeOutput *)output_descriptor_buffer.memory().map();
mapped_output_buffer_data->push_constant_value = 0;
mapped_output_buffer_data->push_descriptor_value = 0;
mapped_output_buffer_data->normal_descriptor_value = 0;
output_descriptor_buffer.memory().unmap();
VkDescriptorBufferInfo push_descriptor_buffer_info = {};
push_descriptor_buffer_info.buffer = push_descriptor_buffer.handle();
push_descriptor_buffer_info.offset = 0;
push_descriptor_buffer_info.range = 4;
VkWriteDescriptorSet push_descriptor_set_write = {};
push_descriptor_set_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
push_descriptor_set_write.descriptorCount = 1;
push_descriptor_set_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
push_descriptor_set_write.dstBinding = 0;
push_descriptor_set_write.pBufferInfo = &push_descriptor_buffer_info;
VkCommandBufferObj command_buffer(m_device, &command_pool);
command_buffer.begin();
vk::CmdBindPipeline(command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline);
vk::CmdPushConstants(command_buffer.handle(), compute_pipeline_layout.handle(), VK_SHADER_STAGE_COMPUTE_BIT, 0, 4,
&push_constant_value);
vkCmdPushDescriptorSetKHR(command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline_layout.handle(), 0, 1,
&push_descriptor_set_write);
vk::CmdBindDescriptorSets(command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline_layout.handle(), 1, 1,
&normal_descriptor_set.set_, 0, nullptr);
vk::CmdBindDescriptorSets(command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline_layout.handle(), 2, 1,
&output_descriptor_set.set_, 0, nullptr);
vkCmdBuildAccelerationStructureNV(command_buffer.handle(), &top_level_as_create_info.info, instance_buffer.handle(), 0,
VK_FALSE, top_level_as.handle(), VK_NULL_HANDLE, top_level_as_scratch.handle(), 0);
vk::CmdDispatch(command_buffer.handle(), 1, 1, 1);
command_buffer.end();
m_errorMonitor->SetDesiredFailureMsg(
kErrorBit, "Attempted to build top level acceleration structure using invalid bottom level acceleration structure handle");
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer.handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
mapped_output_buffer_data = (ComputeOutput *)output_descriptor_buffer.memory().map();
EXPECT_EQ(mapped_output_buffer_data->push_constant_value, push_constant_value);
EXPECT_EQ(mapped_output_buffer_data->push_descriptor_value, push_descriptor_value);
EXPECT_EQ(mapped_output_buffer_data->normal_descriptor_value, normal_descriptor_value);
output_descriptor_buffer.memory().unmap();
// Clean up
vk::DestroyPipeline(m_device->device(), compute_pipeline, nullptr);
}
TEST_F(VkLayerTest, QueryPerformanceCreation) {
TEST_DESCRIPTION("Create performance query without support");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto performance_features = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>();
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performance_features);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performance_features.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performance_features));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
break;
}
if (counters.empty()) {
printf("%s No queue reported any performance counter.\n", kSkipPrefix);
return;
}
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counters.size();
std::vector<uint32_t> counterIndices;
for (uint32_t c = 0; c < counters.size(); c++) counterIndices.push_back(c);
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
// Missing pNext
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkQueryPoolCreateInfo-queryType-03222");
VkQueryPool query_pool;
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_errorMonitor->VerifyFound();
query_pool_ci.pNext = &perf_query_pool_ci;
// Invalid counter indices
counterIndices.push_back(counters.size());
perf_query_pool_ci.counterIndexCount++;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321");
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_errorMonitor->VerifyFound();
perf_query_pool_ci.counterIndexCount--;
counterIndices.pop_back();
// Success
m_errorMonitor->ExpectSuccess(kErrorBit);
vk::CreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_errorMonitor->VerifyNotFound();
m_commandBuffer->begin();
// Missing acquire lock
{
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryPool-03223");
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
m_errorMonitor->VerifyFound();
}
m_commandBuffer->end();
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPerformanceCounterCommandbufferScope) {
TEST_DESCRIPTION("Insert a performance query begin/end with respect to the command buffer counter scope");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>();
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performanceFeatures, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
// Find a single counter with VK_QUERY_SCOPE_COMMAND_BUFFER_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) {
if (counters[counterIdx].scope == VK_QUERY_SCOPE_COMMAND_BUFFER_KHR) {
counterIndices.push_back(counterIdx);
break;
}
}
if (counterIndices.empty()) {
counters.clear();
continue;
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported any performance counter with command buffer scope.\n", kSkipPrefix);
return;
}
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
// Not the first command.
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
m_commandBuffer->begin();
vk::CmdFillBuffer(m_commandBuffer->handle(), buffer, 0, 4096, 0);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryPool-03224");
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(queue);
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
// First command: success.
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
m_commandBuffer->begin();
m_errorMonitor->ExpectSuccess(kErrorBit);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
m_errorMonitor->VerifyNotFound();
vk::CmdFillBuffer(m_commandBuffer->handle(), buffer, 0, 4096, 0);
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(queue);
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
vkReleaseProfilingLockKHR(device());
}
TEST_F(VkLayerTest, QueryPerformanceCounterRenderPassScope) {
TEST_DESCRIPTION("Insert a performance query begin/end with respect to the render pass counter scope");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>();
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
// Find a single counter with VK_QUERY_SCOPE_RENDER_PASS_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) {
if (counters[counterIdx].scope == VK_QUERY_SCOPE_RENDER_PASS_KHR) {
counterIndices.push_back(counterIdx);
break;
}
}
if (counterIndices.empty()) {
counters.clear();
continue;
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported any performance counter with render pass scope.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
// Inside a render pass.
{
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBeginQuery-queryPool-03225");
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(queue);
}
vkReleaseProfilingLockKHR(device());
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPerformanceReleaseProfileLockBeforeSubmit) {
TEST_DESCRIPTION("Verify that we get an error if we release the profiling lock during the recording of performance queries");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>();
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performanceFeatures, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
// Find a single counter with VK_QUERY_SCOPE_RENDER_PASS_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) {
counterIndices.push_back(counterIdx);
break;
}
if (counterIndices.empty()) {
counters.clear();
continue;
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported any performance counter with render pass scope.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
m_commandBuffer->begin();
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
// Relase while recording.
vkReleaseProfilingLockKHR(device());
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkQueueSubmit-pCommandBuffers-03220");
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::QueueWaitIdle(queue);
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
vkReleaseProfilingLockKHR(device());
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueryPerformanceIncompletePasses) {
TEST_DESCRIPTION("Verify that we get an error if we don't submit a command buffer for each passes before getting the results.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkPhysicalDeviceFeatures2KHR features2 = {};
auto hostQueryResetFeatures = lvl_init_struct<VkPhysicalDeviceHostQueryResetFeaturesEXT>();
auto performanceFeatures = lvl_init_struct<VkPhysicalDevicePerformanceQueryFeaturesKHR>(&hostQueryResetFeatures);
features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&performanceFeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!performanceFeatures.performanceCounterQueryPools) {
printf("%s Performance query pools are not supported.\n", kSkipPrefix);
return;
}
if (!hostQueryResetFeatures.hostQueryReset) {
printf("%s Missing host query reset.\n", kSkipPrefix);
return;
}
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &performanceFeatures, pool_flags));
PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR =
(PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)vk::GetInstanceProcAddr(
instance(), "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
ASSERT_TRUE(vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR != nullptr);
PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR =
(PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR");
ASSERT_TRUE(vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR != nullptr);
auto queueFamilyProperties = m_device->phy().queue_properties();
uint32_t queueFamilyIndex = queueFamilyProperties.size();
std::vector<VkPerformanceCounterKHR> counters;
std::vector<uint32_t> counterIndices;
uint32_t nPasses = 0;
// Find a single counter with VK_QUERY_SCOPE_RENDER_PASS_KHR scope.
for (uint32_t idx = 0; idx < queueFamilyProperties.size(); idx++) {
uint32_t nCounters;
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, nullptr, nullptr);
if (nCounters == 0) continue;
counters.resize(nCounters);
for (auto &c : counters) {
c.sType = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR;
c.pNext = nullptr;
}
vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(gpu(), idx, &nCounters, &counters[0], nullptr);
queueFamilyIndex = idx;
for (uint32_t counterIdx = 0; counterIdx < counters.size(); counterIdx++) counterIndices.push_back(counterIdx);
VkQueryPoolPerformanceCreateInfoKHR create_info{};
create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
create_info.queueFamilyIndex = idx;
create_info.counterIndexCount = counterIndices.size();
create_info.pCounterIndices = &counterIndices[0];
vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(gpu(), &create_info, &nPasses);
if (nPasses < 2) {
counters.clear();
continue;
}
break;
}
if (counterIndices.empty()) {
printf("%s No queue reported a set of counters that needs more than one pass.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPoolPerformanceCreateInfoKHR perf_query_pool_ci{};
perf_query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR;
perf_query_pool_ci.queueFamilyIndex = queueFamilyIndex;
perf_query_pool_ci.counterIndexCount = counterIndices.size();
perf_query_pool_ci.pCounterIndices = &counterIndices[0];
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.pNext = &perf_query_pool_ci;
query_pool_ci.queryType = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR;
query_pool_ci.queryCount = 1;
VkQueryPool query_pool;
vk::CreateQueryPool(device(), &query_pool_ci, nullptr, &query_pool);
VkQueue queue = VK_NULL_HANDLE;
vk::GetDeviceQueue(device(), queueFamilyIndex, 0, &queue);
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR =
(PFN_vkAcquireProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkAcquireProfilingLockKHR");
ASSERT_TRUE(vkAcquireProfilingLockKHR != nullptr);
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR =
(PFN_vkReleaseProfilingLockKHR)vk::GetInstanceProcAddr(instance(), "vkReleaseProfilingLockKHR");
ASSERT_TRUE(vkReleaseProfilingLockKHR != nullptr);
PFN_vkResetQueryPoolEXT fpvkResetQueryPoolEXT =
(PFN_vkResetQueryPoolEXT)vk::GetInstanceProcAddr(instance(), "vkResetQueryPoolEXT");
{
VkAcquireProfilingLockInfoKHR lock_info{};
lock_info.sType = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR;
VkResult result = vkAcquireProfilingLockKHR(device(), &lock_info);
ASSERT_TRUE(result == VK_SUCCESS);
}
{
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 4096;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 4096;
VkDeviceMemory mem;
err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(device(), buffer, mem, 0);
VkCommandBufferBeginInfo command_buffer_begin_info{};
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 0);
m_commandBuffer->begin(&command_buffer_begin_info);
vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vk::CmdFillBuffer(m_commandBuffer->handle(), buffer, 0, 4096, 0);
vk::CmdEndQuery(m_commandBuffer->handle(), query_pool, 0);
m_commandBuffer->end();
// Invalid pass index
{
VkPerformanceQuerySubmitInfoKHR perf_submit_info{};
perf_submit_info.sType = VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR;
perf_submit_info.counterPassIndex = nPasses;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &perf_submit_info;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221");
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
// Leave the last pass out.
for (uint32_t passIdx = 0; passIdx < (nPasses - 1); passIdx++) {
VkPerformanceQuerySubmitInfoKHR perf_submit_info{};
perf_submit_info.sType = VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR;
perf_submit_info.counterPassIndex = passIdx;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &perf_submit_info;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
vk::QueueWaitIdle(queue);
std::vector<VkPerformanceCounterResultKHR> results;
results.resize(counterIndices.size());
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03231");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyFound();
{
VkPerformanceQuerySubmitInfoKHR perf_submit_info{};
perf_submit_info.sType = VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR;
perf_submit_info.counterPassIndex = nPasses - 1;
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = &perf_submit_info;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
vk::QueueWaitIdle(queue);
// Invalid stride
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03229");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR) + 4, VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyFound();
// Invalid flags
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03230");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_WITH_AVAILABILITY_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03230");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_PARTIAL_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetQueryPoolResults-queryType-03230");
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_64_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->ExpectSuccess(kErrorBit);
vk::GetQueryPoolResults(device(), query_pool, 0, 1, sizeof(VkPerformanceCounterResultKHR) * results.size(), &results[0],
sizeof(VkPerformanceCounterResultKHR), VK_QUERY_RESULT_WAIT_BIT);
m_errorMonitor->VerifyNotFound();
vk::DestroyBuffer(device(), buffer, nullptr);
vk::FreeMemory(device(), mem, NULL);
}
vkReleaseProfilingLockKHR(device());
vk::DestroyQueryPool(m_device->device(), query_pool, NULL);
}
TEST_F(VkLayerTest, QueueSubmitNoTimelineSemaphoreInfo) {
TEST_DESCRIPTION("Submit a queue with a timeline semaphore but not a VkTimelineSemaphoreSubmitInfoKHR.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
auto timelinefeatures = lvl_init_struct<VkPhysicalDeviceTimelineSemaphoreFeaturesKHR>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&timelinefeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!timelinefeatures.timelineSemaphore) {
printf("%s Timeline semaphores are not supported.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkPipelineStageFlags stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkSubmitInfo submit_info[2] = {};
submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[0].commandBufferCount = 0;
submit_info[0].pWaitDstStageMask = &stageFlags;
submit_info[0].signalSemaphoreCount = 1;
submit_info[0].pSignalSemaphores = &semaphore;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitSemaphores-03239");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
VkTimelineSemaphoreSubmitInfoKHR timeline_semaphore_submit_info{};
uint64_t signalValue = 1;
timeline_semaphore_submit_info.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR;
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &signalValue;
submit_info[0].pNext = &timeline_semaphore_submit_info;
submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[1].commandBufferCount = 0;
submit_info[1].pWaitDstStageMask = &stageFlags;
submit_info[1].waitSemaphoreCount = 1;
submit_info[1].pWaitSemaphores = &semaphore;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitSemaphores-03239");
vk::QueueSubmit(m_device->m_queue, 2, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
TEST_F(VkLayerTest, QueueSubmitTimelineSemaphoreBadValue) {
TEST_DESCRIPTION("Submit a queue with a timeline semaphore using a wrong payload value.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME);
return;
}
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
auto timelinefeatures = lvl_init_struct<VkPhysicalDeviceTimelineSemaphoreFeaturesKHR>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&timelinefeatures);
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
if (!timelinefeatures.timelineSemaphore) {
printf("%s Timeline semaphores are not supported.\n", kSkipPrefix);
return;
}
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr);
auto timelineproperties = lvl_init_struct<VkPhysicalDeviceTimelineSemaphorePropertiesKHR>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&timelineproperties);
vkGetPhysicalDeviceProperties2KHR(gpu(), &prop2);
ASSERT_NO_FATAL_FAILURE(InitState());
VkSemaphoreTypeCreateInfoKHR semaphore_type_create_info{};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkTimelineSemaphoreSubmitInfoKHR timeline_semaphore_submit_info = {};
uint64_t signalValue = 1;
uint64_t waitValue = 3;
timeline_semaphore_submit_info.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR;
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &signalValue;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pWaitSemaphoreValues = &waitValue;
VkPipelineStageFlags stageFlags = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkSubmitInfo submit_info[2] = {};
submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[0].pNext = &timeline_semaphore_submit_info;
submit_info[0].pWaitDstStageMask = &stageFlags;
submit_info[0].signalSemaphoreCount = 1;
submit_info[0].pSignalSemaphores = &semaphore;
submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[1].pNext = &timeline_semaphore_submit_info;
submit_info[1].pWaitDstStageMask = &stageFlags;
submit_info[1].waitSemaphoreCount = 1;
submit_info[1].pWaitSemaphores = &semaphore;
timeline_semaphore_submit_info.signalSemaphoreValueCount = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pNext-03241");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
timeline_semaphore_submit_info.signalSemaphoreValueCount = 1;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 0;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pNext-03240");
vk::QueueSubmit(m_device->m_queue, 2, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
semaphore_type_create_info.initialValue = 5;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pSignalSemaphores-03242");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
// Check if we can test violations of maxTimelineSemaphoreValueDifference
if (timelineproperties.maxTimelineSemaphoreValueDifference < UINT64_MAX) {
semaphore_type_create_info.initialValue = 0;
ASSERT_VK_SUCCESS(vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
signalValue = timelineproperties.maxTimelineSemaphoreValueDifference + 1;
timeline_semaphore_submit_info.pSignalSemaphoreValues = &signalValue;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pSignalSemaphores-03244");
vk::QueueSubmit(m_device->m_queue, 1, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
if (signalValue < UINT64_MAX) {
waitValue = signalValue + 1;
signalValue = 1;
timeline_semaphore_submit_info.waitSemaphoreValueCount = 1;
timeline_semaphore_submit_info.pWaitSemaphoreValues = &waitValue;
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubmitInfo-pWaitSemaphores-03243");
vk::QueueSubmit(m_device->m_queue, 2, submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
vk::DestroySemaphore(m_device->device(), semaphore, nullptr);
}
}
TEST_F(VkLayerTest, InvalidExternalSemaphore) {
TEST_DESCRIPTION("Import and export invalid external semaphores, no queue sumbits involved.");
#ifdef _WIN32
printf("%s Test doesn't currently support Win32 semaphore, skipping test\n", kSkipPrefix);
return;
#else
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
// Check for external semaphore instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for external semaphore device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Create a semaphore fpr importing
VkSemaphoreCreateInfo semaphore_create_info = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO};
semaphore_create_info.pNext = nullptr;
semaphore_create_info.flags = 0;
VkSemaphore import_semaphore;
VkResult err = vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &import_semaphore);
ASSERT_VK_SUCCESS(err);
int fd = 0;
VkImportSemaphoreFdInfoKHR import_semaphore_fd_info = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR};
import_semaphore_fd_info.pNext = nullptr;
import_semaphore_fd_info.semaphore = import_semaphore;
import_semaphore_fd_info.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR;
import_semaphore_fd_info.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT;
import_semaphore_fd_info.fd = fd;
auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vk::GetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR");
m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkImportSemaphoreFdInfoKHR-handleType-01143");
vkImportSemaphoreFdKHR(device(), &import_semaphore_fd_info);
m_errorMonitor->VerifyFound();
// Cleanup
vk::DestroySemaphore(device(), import_semaphore, nullptr);
#endif
}
| 1 | 12,764 | I've filed an internal spec issue to add these missing VUs. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -24,6 +24,7 @@ import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import java.io.IOException;
+import java.io.UncheckedIOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.thoughtworks.selenium.webdriven;
import com.google.common.io.Resources;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
public class JavascriptLibrary {
static final String PREFIX = "/" + JavascriptLibrary.class.getPackage()
.getName().replace(".", "/") + "/";
private final ConcurrentHashMap<String, String> scripts = new ConcurrentHashMap<>();
private static final String injectableSelenium =
"/com/thoughtworks/selenium/webdriven/injectableSelenium.js";
private static final String htmlUtils =
"/com/thoughtworks/selenium/webdriven/htmlutils.js";
/**
* Loads the named Selenium script and returns it wrapped in an anonymous function.
*
* @param name The script to load.
* @return The loaded script wrapped in an anonymous function.
*/
public String getSeleniumScript(String name) {
String rawFunction = readScript(PREFIX + name);
return String.format("function() { return (%s).apply(null, arguments);}",
rawFunction);
}
public void callEmbeddedSelenium(WebDriver driver, String functionName,
WebElement element, Object... values) {
List<Object> args = new ArrayList<>();
args.add(element);
args.addAll(Arrays.asList(values));
String script = readScript(injectableSelenium) + "return browserbot." + functionName
+ ".apply(browserbot, arguments);";
((JavascriptExecutor) driver).executeScript(script, args.toArray());
}
public Object callEmbeddedHtmlUtils(WebDriver driver, String functionName, WebElement element,
Object... values) {
List<Object> args = new ArrayList<>();
args.add(element);
args.addAll(Arrays.asList(values));
String script = readScript(htmlUtils) + "return htmlutils." + functionName
+ ".apply(htmlutils, arguments);";
return ((JavascriptExecutor) driver).executeScript(script, args.toArray());
}
public Object executeScript(WebDriver driver, String script, Object... args) {
if (driver instanceof JavascriptExecutor) {
return ((JavascriptExecutor) driver).executeScript(script, args);
}
throw new UnsupportedOperationException(
"The underlying WebDriver instance does not support executing javascript");
}
private String readScript(String script) {
return scripts.computeIfAbsent(script, this::readScriptImpl);
}
String readScriptImpl(String script) {
URL url = getClass().getResource(script);
if (url == null) {
throw new RuntimeException("Cannot locate " + script);
}
try {
return Resources.toString(url, StandardCharsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| 1 | 19,388 | Can you please revert changes to files in the `thoughtworks` package? This is legacy code and we will eventually phase out RC. | SeleniumHQ-selenium | java |
@@ -24,16 +24,11 @@ import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.time.Instant;
import java.time.ZoneOffset;
+import java.util.Arrays;
import java.util.List;
import java.util.Map;
-import org.apache.flink.table.data.ArrayData;
-import org.apache.flink.table.data.DecimalData;
-import org.apache.flink.table.data.GenericRowData;
-import org.apache.flink.table.data.MapData;
-import org.apache.flink.table.data.RawValueData;
-import org.apache.flink.table.data.RowData;
-import org.apache.flink.table.data.StringData;
-import org.apache.flink.table.data.TimestampData;
+
+import org.apache.flink.table.data.*;
import org.apache.iceberg.MetadataColumns;
import org.apache.iceberg.Schema;
import org.apache.iceberg.parquet.ParquetValueReader; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink.data;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.time.Instant;
import java.time.ZoneOffset;
import java.util.List;
import java.util.Map;
import org.apache.flink.table.data.ArrayData;
import org.apache.flink.table.data.DecimalData;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.MapData;
import org.apache.flink.table.data.RawValueData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.iceberg.MetadataColumns;
import org.apache.iceberg.Schema;
import org.apache.iceberg.parquet.ParquetValueReader;
import org.apache.iceberg.parquet.ParquetValueReaders;
import org.apache.iceberg.parquet.TypeWithSchemaVisitor;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.ArrayUtil;
import org.apache.parquet.column.ColumnDescriptor;
import org.apache.parquet.io.api.Binary;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.LogicalTypeAnnotation.DecimalLogicalTypeAnnotation;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
public class FlinkParquetReaders {
private FlinkParquetReaders() {
}
public static ParquetValueReader<RowData> buildReader(Schema expectedSchema, MessageType fileSchema) {
return buildReader(expectedSchema, fileSchema, ImmutableMap.of());
}
@SuppressWarnings("unchecked")
public static ParquetValueReader<RowData> buildReader(Schema expectedSchema,
MessageType fileSchema,
Map<Integer, ?> idToConstant) {
return (ParquetValueReader<RowData>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema,
new ReadBuilder(fileSchema, idToConstant)
);
}
private static class ReadBuilder extends TypeWithSchemaVisitor<ParquetValueReader<?>> {
private final MessageType type;
private final Map<Integer, ?> idToConstant;
ReadBuilder(MessageType type, Map<Integer, ?> idToConstant) {
this.type = type;
this.idToConstant = idToConstant;
}
@Override
public ParquetValueReader<RowData> message(Types.StructType expected, MessageType message,
List<ParquetValueReader<?>> fieldReaders) {
return struct(expected, message.asGroupType(), fieldReaders);
}
@Override
public ParquetValueReader<RowData> struct(Types.StructType expected, GroupType struct,
List<ParquetValueReader<?>> fieldReaders) {
// match the expected struct's order
Map<Integer, ParquetValueReader<?>> readersById = Maps.newHashMap();
Map<Integer, Type> typesById = Maps.newHashMap();
List<Type> fields = struct.getFields();
for (int i = 0; i < fields.size(); i += 1) {
Type fieldType = fields.get(i);
if (fieldReaders.get(i) != null) {
int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())) - 1;
if (fieldType.getId() != null) {
int id = fieldType.getId().intValue();
readersById.put(id, ParquetValueReaders.option(fieldType, fieldD, fieldReaders.get(i)));
typesById.put(id, fieldType);
}
}
}
List<Types.NestedField> expectedFields = expected != null ?
expected.fields() : ImmutableList.of();
List<ParquetValueReader<?>> reorderedFields = Lists.newArrayListWithExpectedSize(
expectedFields.size());
List<Type> types = Lists.newArrayListWithExpectedSize(expectedFields.size());
for (Types.NestedField field : expectedFields) {
int id = field.fieldId();
if (idToConstant.containsKey(id)) {
// containsKey is used because the constant may be null
reorderedFields.add(ParquetValueReaders.constant(idToConstant.get(id)));
types.add(null);
} else if (id == MetadataColumns.ROW_POSITION.fieldId()) {
reorderedFields.add(ParquetValueReaders.position());
types.add(null);
} else if (id == MetadataColumns.IS_DELETED.fieldId()) {
reorderedFields.add(ParquetValueReaders.constant(false));
types.add(null);
} else {
ParquetValueReader<?> reader = readersById.get(id);
if (reader != null) {
reorderedFields.add(reader);
types.add(typesById.get(id));
} else {
reorderedFields.add(ParquetValueReaders.nulls());
types.add(null);
}
}
}
return new RowDataReader(types, reorderedFields);
}
@Override
public ParquetValueReader<?> list(Types.ListType expectedList, GroupType array,
ParquetValueReader<?> elementReader) {
if (expectedList == null) {
return null;
}
GroupType repeated = array.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath) - 1;
int repeatedR = type.getMaxRepetitionLevel(repeatedPath) - 1;
Type elementType = repeated.getType(0);
int elementD = type.getMaxDefinitionLevel(path(elementType.getName())) - 1;
return new ArrayReader<>(repeatedD, repeatedR, ParquetValueReaders.option(elementType, elementD, elementReader));
}
@Override
public ParquetValueReader<?> map(Types.MapType expectedMap, GroupType map,
ParquetValueReader<?> keyReader,
ParquetValueReader<?> valueReader) {
if (expectedMap == null) {
return null;
}
GroupType repeatedKeyValue = map.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath) - 1;
int repeatedR = type.getMaxRepetitionLevel(repeatedPath) - 1;
Type keyType = repeatedKeyValue.getType(0);
int keyD = type.getMaxDefinitionLevel(path(keyType.getName())) - 1;
Type valueType = repeatedKeyValue.getType(1);
int valueD = type.getMaxDefinitionLevel(path(valueType.getName())) - 1;
return new MapReader<>(repeatedD, repeatedR,
ParquetValueReaders.option(keyType, keyD, keyReader),
ParquetValueReaders.option(valueType, valueD, valueReader));
}
@Override
@SuppressWarnings("CyclomaticComplexity")
public ParquetValueReader<?> primitive(org.apache.iceberg.types.Type.PrimitiveType expected,
PrimitiveType primitive) {
if (expected == null) {
return null;
}
ColumnDescriptor desc = type.getColumnDescription(currentPath());
if (primitive.getOriginalType() != null) {
switch (primitive.getOriginalType()) {
case ENUM:
case JSON:
case UTF8:
return new StringReader(desc);
case INT_8:
case INT_16:
case INT_32:
if (expected.typeId() == Types.LongType.get().typeId()) {
return new ParquetValueReaders.IntAsLongReader(desc);
} else {
return new ParquetValueReaders.UnboxedReader<>(desc);
}
case TIME_MICROS:
return new LossyMicrosToMillisTimeReader(desc);
case TIME_MILLIS:
return new MillisTimeReader(desc);
case DATE:
case INT_64:
return new ParquetValueReaders.UnboxedReader<>(desc);
case TIMESTAMP_MICROS:
if (((Types.TimestampType) expected).shouldAdjustToUTC()) {
return new MicrosToTimestampTzReader(desc);
} else {
return new MicrosToTimestampReader(desc);
}
case TIMESTAMP_MILLIS:
if (((Types.TimestampType) expected).shouldAdjustToUTC()) {
return new MillisToTimestampTzReader(desc);
} else {
return new MillisToTimestampReader(desc);
}
case DECIMAL:
DecimalLogicalTypeAnnotation decimal = (DecimalLogicalTypeAnnotation) primitive.getLogicalTypeAnnotation();
switch (primitive.getPrimitiveTypeName()) {
case BINARY:
case FIXED_LEN_BYTE_ARRAY:
return new BinaryDecimalReader(desc, decimal.getPrecision(), decimal.getScale());
case INT64:
return new LongDecimalReader(desc, decimal.getPrecision(), decimal.getScale());
case INT32:
return new IntegerDecimalReader(desc, decimal.getPrecision(), decimal.getScale());
default:
throw new UnsupportedOperationException(
"Unsupported base type for decimal: " + primitive.getPrimitiveTypeName());
}
case BSON:
return new ParquetValueReaders.ByteArrayReader(desc);
default:
throw new UnsupportedOperationException(
"Unsupported logical type: " + primitive.getOriginalType());
}
}
switch (primitive.getPrimitiveTypeName()) {
case FIXED_LEN_BYTE_ARRAY:
case BINARY:
return new ParquetValueReaders.ByteArrayReader(desc);
case INT32:
if (expected.typeId() == org.apache.iceberg.types.Type.TypeID.LONG) {
return new ParquetValueReaders.IntAsLongReader(desc);
} else {
return new ParquetValueReaders.UnboxedReader<>(desc);
}
case FLOAT:
if (expected.typeId() == org.apache.iceberg.types.Type.TypeID.DOUBLE) {
return new ParquetValueReaders.FloatAsDoubleReader(desc);
} else {
return new ParquetValueReaders.UnboxedReader<>(desc);
}
case BOOLEAN:
case INT64:
case DOUBLE:
return new ParquetValueReaders.UnboxedReader<>(desc);
default:
throw new UnsupportedOperationException("Unsupported type: " + primitive);
}
}
}
private static class BinaryDecimalReader extends ParquetValueReaders.PrimitiveReader<DecimalData> {
private final int precision;
private final int scale;
BinaryDecimalReader(ColumnDescriptor desc, int precision, int scale) {
super(desc);
this.precision = precision;
this.scale = scale;
}
@Override
public DecimalData read(DecimalData ignored) {
Binary binary = column.nextBinary();
BigDecimal bigDecimal = new BigDecimal(new BigInteger(binary.getBytes()), scale);
// TODO: need a unit test to write-read-validate decimal via FlinkParquetWrite/Reader
return DecimalData.fromBigDecimal(bigDecimal, precision, scale);
}
}
private static class IntegerDecimalReader extends ParquetValueReaders.PrimitiveReader<DecimalData> {
private final int precision;
private final int scale;
IntegerDecimalReader(ColumnDescriptor desc, int precision, int scale) {
super(desc);
this.precision = precision;
this.scale = scale;
}
@Override
public DecimalData read(DecimalData ignored) {
return DecimalData.fromUnscaledLong(column.nextInteger(), precision, scale);
}
}
private static class LongDecimalReader extends ParquetValueReaders.PrimitiveReader<DecimalData> {
private final int precision;
private final int scale;
LongDecimalReader(ColumnDescriptor desc, int precision, int scale) {
super(desc);
this.precision = precision;
this.scale = scale;
}
@Override
public DecimalData read(DecimalData ignored) {
return DecimalData.fromUnscaledLong(column.nextLong(), precision, scale);
}
}
private static class MicrosToTimestampTzReader extends ParquetValueReaders.UnboxedReader<TimestampData> {
MicrosToTimestampTzReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public TimestampData read(TimestampData ignored) {
long value = readLong();
return TimestampData.fromLocalDateTime(Instant.ofEpochSecond(Math.floorDiv(value, 1000_000),
Math.floorMod(value, 1000_000) * 1000)
.atOffset(ZoneOffset.UTC)
.toLocalDateTime());
}
@Override
public long readLong() {
return column.nextLong();
}
}
private static class MicrosToTimestampReader extends ParquetValueReaders.UnboxedReader<TimestampData> {
MicrosToTimestampReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public TimestampData read(TimestampData ignored) {
long value = readLong();
return TimestampData.fromInstant(Instant.ofEpochSecond(Math.floorDiv(value, 1000_000),
Math.floorMod(value, 1000_000) * 1000));
}
@Override
public long readLong() {
return column.nextLong();
}
}
private static class MillisToTimestampReader extends ParquetValueReaders.UnboxedReader<TimestampData> {
MillisToTimestampReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public TimestampData read(TimestampData ignored) {
long millis = readLong();
return TimestampData.fromEpochMillis(millis);
}
@Override
public long readLong() {
return column.nextLong();
}
}
private static class MillisToTimestampTzReader extends ParquetValueReaders.UnboxedReader<TimestampData> {
MillisToTimestampTzReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public TimestampData read(TimestampData ignored) {
long millis = readLong();
return TimestampData.fromLocalDateTime(Instant.ofEpochMilli(millis)
.atOffset(ZoneOffset.UTC)
.toLocalDateTime());
}
@Override
public long readLong() {
return column.nextLong();
}
}
private static class StringReader extends ParquetValueReaders.PrimitiveReader<StringData> {
StringReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public StringData read(StringData ignored) {
Binary binary = column.nextBinary();
ByteBuffer buffer = binary.toByteBuffer();
if (buffer.hasArray()) {
return StringData.fromBytes(
buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
} else {
return StringData.fromBytes(binary.getBytes());
}
}
}
private static class LossyMicrosToMillisTimeReader extends ParquetValueReaders.PrimitiveReader<Integer> {
LossyMicrosToMillisTimeReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public Integer read(Integer reuse) {
// Discard microseconds since Flink uses millisecond unit for TIME type.
return (int) Math.floorDiv(column.nextLong(), 1000);
}
}
private static class MillisTimeReader extends ParquetValueReaders.PrimitiveReader<Integer> {
MillisTimeReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public Integer read(Integer reuse) {
return (int) column.nextLong();
}
}
private static class ArrayReader<E> extends ParquetValueReaders.RepeatedReader<ArrayData, ReusableArrayData, E> {
private int readPos = 0;
private int writePos = 0;
ArrayReader(int definitionLevel, int repetitionLevel, ParquetValueReader<E> reader) {
super(definitionLevel, repetitionLevel, reader);
}
@Override
protected ReusableArrayData newListData(ArrayData reuse) {
this.readPos = 0;
this.writePos = 0;
if (reuse instanceof ReusableArrayData) {
return (ReusableArrayData) reuse;
} else {
return new ReusableArrayData();
}
}
@Override
@SuppressWarnings("unchecked")
protected E getElement(ReusableArrayData list) {
E value = null;
if (readPos < list.capacity()) {
value = (E) list.values[readPos];
}
readPos += 1;
return value;
}
@Override
protected void addElement(ReusableArrayData reused, E element) {
if (writePos >= reused.capacity()) {
reused.grow();
}
reused.values[writePos] = element;
writePos += 1;
}
@Override
protected ArrayData buildList(ReusableArrayData list) {
list.setNumElements(writePos);
return list;
}
}
private static class MapReader<K, V> extends
ParquetValueReaders.RepeatedKeyValueReader<MapData, ReusableMapData, K, V> {
private int readPos = 0;
private int writePos = 0;
private final ParquetValueReaders.ReusableEntry<K, V> entry = new ParquetValueReaders.ReusableEntry<>();
private final ParquetValueReaders.ReusableEntry<K, V> nullEntry = new ParquetValueReaders.ReusableEntry<>();
MapReader(int definitionLevel, int repetitionLevel,
ParquetValueReader<K> keyReader, ParquetValueReader<V> valueReader) {
super(definitionLevel, repetitionLevel, keyReader, valueReader);
}
@Override
protected ReusableMapData newMapData(MapData reuse) {
this.readPos = 0;
this.writePos = 0;
if (reuse instanceof ReusableMapData) {
return (ReusableMapData) reuse;
} else {
return new ReusableMapData();
}
}
@Override
@SuppressWarnings("unchecked")
protected Map.Entry<K, V> getPair(ReusableMapData map) {
Map.Entry<K, V> kv = nullEntry;
if (readPos < map.capacity()) {
entry.set((K) map.keys.values[readPos], (V) map.values.values[readPos]);
kv = entry;
}
readPos += 1;
return kv;
}
@Override
protected void addPair(ReusableMapData map, K key, V value) {
if (writePos >= map.capacity()) {
map.grow();
}
map.keys.values[writePos] = key;
map.values.values[writePos] = value;
writePos += 1;
}
@Override
protected MapData buildMap(ReusableMapData map) {
map.setNumElements(writePos);
return map;
}
}
private static class RowDataReader extends ParquetValueReaders.StructReader<RowData, GenericRowData> {
private final int numFields;
RowDataReader(List<Type> types, List<ParquetValueReader<?>> readers) {
super(types, readers);
this.numFields = readers.size();
}
@Override
protected GenericRowData newStructData(RowData reuse) {
if (reuse instanceof GenericRowData) {
return (GenericRowData) reuse;
} else {
return new GenericRowData(numFields);
}
}
@Override
protected Object getField(GenericRowData intermediate, int pos) {
return intermediate.getField(pos);
}
@Override
protected RowData buildStruct(GenericRowData struct) {
return struct;
}
@Override
protected void set(GenericRowData row, int pos, Object value) {
row.setField(pos, value);
}
@Override
protected void setNull(GenericRowData row, int pos) {
row.setField(pos, null);
}
@Override
protected void setBoolean(GenericRowData row, int pos, boolean value) {
row.setField(pos, value);
}
@Override
protected void setInteger(GenericRowData row, int pos, int value) {
row.setField(pos, value);
}
@Override
protected void setLong(GenericRowData row, int pos, long value) {
row.setField(pos, value);
}
@Override
protected void setFloat(GenericRowData row, int pos, float value) {
row.setField(pos, value);
}
@Override
protected void setDouble(GenericRowData row, int pos, double value) {
row.setField(pos, value);
}
}
private static class ReusableMapData implements MapData {
private final ReusableArrayData keys;
private final ReusableArrayData values;
private int numElements;
private ReusableMapData() {
this.keys = new ReusableArrayData();
this.values = new ReusableArrayData();
}
private void grow() {
keys.grow();
values.grow();
}
private int capacity() {
return keys.capacity();
}
public void setNumElements(int numElements) {
this.numElements = numElements;
keys.setNumElements(numElements);
values.setNumElements(numElements);
}
@Override
public int size() {
return numElements;
}
@Override
public ReusableArrayData keyArray() {
return keys;
}
@Override
public ReusableArrayData valueArray() {
return values;
}
}
private static class ReusableArrayData implements ArrayData {
private static final Object[] EMPTY = new Object[0];
private Object[] values = EMPTY;
private int numElements = 0;
private void grow() {
if (values.length == 0) {
this.values = new Object[20];
} else {
Object[] old = values;
this.values = new Object[old.length << 1];
// copy the old array in case it has values that can be reused
System.arraycopy(old, 0, values, 0, old.length);
}
}
private int capacity() {
return values.length;
}
public void setNumElements(int numElements) {
this.numElements = numElements;
}
@Override
public int size() {
return numElements;
}
@Override
public boolean isNullAt(int ordinal) {
return null == values[ordinal];
}
@Override
public boolean getBoolean(int ordinal) {
return (boolean) values[ordinal];
}
@Override
public byte getByte(int ordinal) {
return (byte) values[ordinal];
}
@Override
public short getShort(int ordinal) {
return (short) values[ordinal];
}
@Override
public int getInt(int ordinal) {
return (int) values[ordinal];
}
@Override
public long getLong(int ordinal) {
return (long) values[ordinal];
}
@Override
public float getFloat(int ordinal) {
return (float) values[ordinal];
}
@Override
public double getDouble(int ordinal) {
return (double) values[ordinal];
}
@Override
public StringData getString(int pos) {
return (StringData) values[pos];
}
@Override
public DecimalData getDecimal(int pos, int precision, int scale) {
return (DecimalData) values[pos];
}
@Override
public TimestampData getTimestamp(int pos, int precision) {
return (TimestampData) values[pos];
}
@SuppressWarnings("unchecked")
@Override
public <T> RawValueData<T> getRawValue(int pos) {
return (RawValueData<T>) values[pos];
}
@Override
public byte[] getBinary(int ordinal) {
return (byte[]) values[ordinal];
}
@Override
public ArrayData getArray(int ordinal) {
return (ArrayData) values[ordinal];
}
@Override
public MapData getMap(int ordinal) {
return (MapData) values[ordinal];
}
@Override
public RowData getRow(int pos, int numFields) {
return (RowData) values[pos];
}
@Override
public boolean[] toBooleanArray() {
return ArrayUtil.toPrimitive((Boolean[]) values);
}
@Override
public byte[] toByteArray() {
return ArrayUtil.toPrimitive((Byte[]) values);
}
@Override
public short[] toShortArray() {
return ArrayUtil.toPrimitive((Short[]) values);
}
@Override
public int[] toIntArray() {
return ArrayUtil.toPrimitive((Integer[]) values);
}
@Override
public long[] toLongArray() {
return ArrayUtil.toPrimitive((Long[]) values);
}
@Override
public float[] toFloatArray() {
return ArrayUtil.toPrimitive((Float[]) values);
}
@Override
public double[] toDoubleArray() {
return ArrayUtil.toPrimitive((Double[]) values);
}
}
}
| 1 | 41,702 | In iceberg, we usually don't use `*` to import package, it's more clear to import the specify package one by one. | apache-iceberg | java |
@@ -19,6 +19,8 @@ package org.openqa.grid.web;
import com.google.common.collect.Maps;
+import com.sun.org.glassfish.gmbal.ManagedObject;
+
import org.openqa.grid.internal.Registry;
import org.openqa.grid.internal.utils.GridHubConfiguration;
import org.openqa.grid.web.servlet.DisplayHelpServlet; | 1 | /*
Copyright 2011 Selenium committers
Copyright 2011 Software Freedom Conservancy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.grid.web;
import com.google.common.collect.Maps;
import org.openqa.grid.internal.Registry;
import org.openqa.grid.internal.utils.GridHubConfiguration;
import org.openqa.grid.web.servlet.DisplayHelpServlet;
import org.openqa.grid.web.servlet.DriverServlet;
import org.openqa.grid.web.servlet.Grid1HeartbeatServlet;
import org.openqa.grid.web.servlet.HubStatusServlet;
import org.openqa.grid.web.servlet.LifecycleServlet;
import org.openqa.grid.web.servlet.ProxyStatusServlet;
import org.openqa.grid.web.servlet.RegistrationServlet;
import org.openqa.grid.web.servlet.ResourceServlet;
import org.openqa.grid.web.servlet.TestSessionStatusServlet;
import org.openqa.grid.web.servlet.beta.ConsoleServlet;
import org.openqa.grid.web.utils.ExtraServletUtil;
import org.openqa.selenium.net.NetworkUtils;
import org.seleniumhq.jetty7.server.Server;
import org.seleniumhq.jetty7.server.bio.SocketConnector;
import org.seleniumhq.jetty7.servlet.ServletContextHandler;
import org.seleniumhq.jetty7.util.thread.QueuedThreadPool;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Map;
import java.util.logging.Logger;
import javax.servlet.Servlet;
/**
* Jetty server. Main entry point for everything about the grid. <p/> Except for unit tests, this
* should be a singleton.
*/
public class Hub {
private static final Logger log = Logger.getLogger(Hub.class.getName());
private final int port;
private final String host;
private final int maxThread;
private final boolean isHostRestricted;
private final Registry registry;
private final Map<String, Class<? extends Servlet>> extraServlet = Maps.newHashMap();
private Server server;
private void addServlet(String key, Class<? extends Servlet> s) {
extraServlet.put(key, s);
}
/**
* get the registry backing up the hub state.
*
* @return The registry
*/
public Registry getRegistry() {
return registry;
}
public Hub(GridHubConfiguration config) {
registry = Registry.newInstance(this, config);
maxThread = config.getJettyMaxThreads();
if (config.getHost() != null) {
host = config.getHost();
isHostRestricted = true;
} else {
NetworkUtils utils = new NetworkUtils();
host = utils.getIp4NonLoopbackAddressOfThisMachine().getHostAddress();
isHostRestricted = false;
}
this.port = config.getPort();
for (String s : config.getServlets()) {
Class<? extends Servlet> servletClass = ExtraServletUtil.createServlet(s);
if (servletClass != null) {
String path = "/grid/admin/" + servletClass.getSimpleName() + "/*";
log.info("binding " + servletClass.getCanonicalName() + " to " + path);
addServlet(path, servletClass);
}
}
initServer();
}
private void initServer() {
try {
server = new Server();
SocketConnector socketListener = new SocketConnector();
socketListener.setMaxIdleTime(60000);
if (isHostRestricted) {
socketListener.setHost(host);
}
socketListener.setPort(port);
socketListener.setLowResourcesMaxIdleTime(6000);
server.addConnector(socketListener);
ServletContextHandler root = new ServletContextHandler(ServletContextHandler.SESSIONS);
root.setContextPath("/");
server.setHandler(root);
root.setAttribute(Registry.KEY, registry);
root.addServlet(DisplayHelpServlet.class.getName(), "/*");
root.addServlet(ConsoleServlet.class.getName(), "/grid/console/*");
root.addServlet(ConsoleServlet.class.getName(), "/grid/beta/console/*");
root.addServlet(org.openqa.grid.web.servlet.ConsoleServlet.class.getName(), "/grid/old/console/*");
root.addServlet(RegistrationServlet.class.getName(), "/grid/register/*");
// TODO remove at some point. Here for backward compatibility of
// tests etc.
root.addServlet(DriverServlet.class.getName(), "/grid/driver/*");
root.addServlet(DriverServlet.class.getName(), "/wd/hub/*");
root.addServlet(DriverServlet.class.getName(), "/selenium-server/driver/*");
root.addServlet(ResourceServlet.class.getName(), "/grid/resources/*");
root.addServlet(ProxyStatusServlet.class.getName(), "/grid/api/proxy/*");
root.addServlet(HubStatusServlet.class.getName(), "/grid/api/hub/*");
root.addServlet(TestSessionStatusServlet.class.getName(), "/grid/api/testsession/*");
root.addServlet(LifecycleServlet.class.getName(), "/lifecycle-manager/*");
// Selenium Grid 1.0 compatibility routes for older nodes trying to
// work with the newer hub.
root.addServlet(RegistrationServlet.class.getName(), "/registration-manager/register/*");
root.addServlet(Grid1HeartbeatServlet.class.getName(), "/heartbeat");
// Load any additional servlets provided by the user.
for (Map.Entry<String, Class<? extends Servlet>> entry : extraServlet.entrySet()) {
root.addServlet(entry.getValue().getName(), entry.getKey());
}
} catch (Throwable e) {
throw new RuntimeException("Error initializing the hub" + e.getMessage(), e);
}
}
public int getPort() {
return port;
}
public String getHost() {
return host;
}
public void start() throws Exception {
initServer();
if (maxThread>0){
QueuedThreadPool pool = new QueuedThreadPool();
pool.setMaxThreads(maxThread);
server.setThreadPool(pool);
}
server.start();
}
public void stop() throws Exception {
server.stop();
}
public URL getUrl() {
try {
return new URL("http://" + getHost() + ":" + getPort());
} catch (MalformedURLException e) {
throw new RuntimeException(e.getMessage());
}
}
public URL getRegistrationURL() {
String uri = "http://" + getHost() + ":" + getPort() + "/grid/register/";
try {
return new URL(uri);
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}
}
| 1 | 11,533 | And again. The reason it's bad is that if someone uses a JDK not produced by Oracle they won't have this class. | SeleniumHQ-selenium | js |
@@ -18,7 +18,8 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure
// There's no reason to stop timing the write after the connection is closed.
var oneBufferSize = maxResponseBufferSize.Value;
var maxBufferedBytes = oneBufferSize < long.MaxValue / 2 ? oneBufferSize * 2 : long.MaxValue;
- timeoutControl.StartTimingWrite(minDataRate, maxBufferedBytes);
+ timeoutControl.BytesWritten(minDataRate, maxBufferedBytes);
+ timeoutControl.StartTimingWrite();
}
}
} | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure
{
public static class TimeoutControlExtensions
{
public static void StartDrainTimeout(this ITimeoutControl timeoutControl, MinDataRate minDataRate, long? maxResponseBufferSize)
{
// If maxResponseBufferSize has no value, there's no backpressure and we can't reasonably timeout draining.
if (minDataRate == null || maxResponseBufferSize == null)
{
return;
}
// With full backpressure and a connection adapter there could be 2 two pipes buffering.
// We already validate that the buffer size is positive.
// There's no reason to stop timing the write after the connection is closed.
var oneBufferSize = maxResponseBufferSize.Value;
var maxBufferedBytes = oneBufferSize < long.MaxValue / 2 ? oneBufferSize * 2 : long.MaxValue;
timeoutControl.StartTimingWrite(minDataRate, maxBufferedBytes);
}
}
}
| 1 | 17,044 | Since we now keep track of all bytes written, and extend the write timeout as needed, it's tempting to no longer add 2 times the max buffer size to the bytes written accounting for the connection drain timeout. As we've discussed before, this add several minutes to the timeout with the default 240 bytes/sec rate limit. I'm thinking instead this line changes to `timeoutControl.BytesWritten(minDataRate, 1);` to add a grace period to the drain if necessary. What do you think @Tratcher? | aspnet-KestrelHttpServer | .cs |
@@ -0,0 +1,18 @@
+const formPropsSet = new Set([
+ 'form',
+ 'formAction',
+ 'formEncType',
+ 'formMethod',
+ 'formNoValidate',
+ 'formTarget',
+]);
+
+export default function getFormProps(props) {
+ return Object.keys(props).reduce((prev, key) => {
+ if (formPropsSet.has(key)) {
+ // eslint-disable-next-line no-param-reassign
+ prev[key] = props[key];
+ }
+ return prev;
+ }, {});
+} | 1 | 1 | 12,952 | Good call. Makes we wonder if we should do this with the ARIA props. | salesforce-design-system-react | js |
|
@@ -24,9 +24,7 @@ import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.charset.Charset;
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.SQLException;
+import java.sql.*;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Calendar; | 1 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl.cfg;
import static org.camunda.bpm.engine.impl.cmd.HistoryCleanupCmd.MAX_THREADS_NUMBER;
import static org.camunda.bpm.engine.impl.util.EnsureUtil.ensureNotNull;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.charset.Charset;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import javax.naming.InitialContext;
import javax.sql.DataSource;
import org.apache.ibatis.builder.xml.XMLConfigBuilder;
import org.apache.ibatis.datasource.pooled.PooledDataSource;
import org.apache.ibatis.mapping.Environment;
import org.apache.ibatis.session.Configuration;
import org.apache.ibatis.session.ExecutorType;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.ibatis.session.defaults.DefaultSqlSessionFactory;
import org.apache.ibatis.transaction.TransactionFactory;
import org.apache.ibatis.transaction.jdbc.JdbcTransactionFactory;
import org.apache.ibatis.transaction.managed.ManagedTransactionFactory;
import org.camunda.bpm.dmn.engine.DmnEngine;
import org.camunda.bpm.dmn.engine.DmnEngineConfiguration;
import org.camunda.bpm.dmn.engine.impl.DefaultDmnEngineConfiguration;
import org.camunda.bpm.engine.ArtifactFactory;
import org.camunda.bpm.engine.AuthorizationService;
import org.camunda.bpm.engine.CaseService;
import org.camunda.bpm.engine.DecisionService;
import org.camunda.bpm.engine.ExternalTaskService;
import org.camunda.bpm.engine.FilterService;
import org.camunda.bpm.engine.FormService;
import org.camunda.bpm.engine.HistoryService;
import org.camunda.bpm.engine.IdentityService;
import org.camunda.bpm.engine.ManagementService;
import org.camunda.bpm.engine.ProcessEngine;
import org.camunda.bpm.engine.ProcessEngineConfiguration;
import org.camunda.bpm.engine.ProcessEngineException;
import org.camunda.bpm.engine.RepositoryService;
import org.camunda.bpm.engine.RuntimeService;
import org.camunda.bpm.engine.TaskService;
import org.camunda.bpm.engine.authorization.Groups;
import org.camunda.bpm.engine.authorization.Permission;
import org.camunda.bpm.engine.authorization.Permissions;
import org.camunda.bpm.engine.impl.AuthorizationServiceImpl;
import org.camunda.bpm.engine.impl.DecisionServiceImpl;
import org.camunda.bpm.engine.impl.DefaultArtifactFactory;
import org.camunda.bpm.engine.impl.ExternalTaskServiceImpl;
import org.camunda.bpm.engine.impl.FilterServiceImpl;
import org.camunda.bpm.engine.impl.FormServiceImpl;
import org.camunda.bpm.engine.impl.HistoryServiceImpl;
import org.camunda.bpm.engine.impl.IdentityServiceImpl;
import org.camunda.bpm.engine.impl.ManagementServiceImpl;
import org.camunda.bpm.engine.impl.ModificationBatchJobHandler;
import org.camunda.bpm.engine.impl.OptimizeService;
import org.camunda.bpm.engine.impl.PriorityProvider;
import org.camunda.bpm.engine.impl.ProcessEngineImpl;
import org.camunda.bpm.engine.impl.RepositoryServiceImpl;
import org.camunda.bpm.engine.impl.RestartProcessInstancesJobHandler;
import org.camunda.bpm.engine.impl.RuntimeServiceImpl;
import org.camunda.bpm.engine.impl.ServiceImpl;
import org.camunda.bpm.engine.impl.TaskServiceImpl;
import org.camunda.bpm.engine.impl.application.ProcessApplicationManager;
import org.camunda.bpm.engine.impl.batch.removaltime.BatchSetRemovalTimeJobHandler;
import org.camunda.bpm.engine.impl.batch.removaltime.DecisionSetRemovalTimeJobHandler;
import org.camunda.bpm.engine.impl.batch.removaltime.ProcessSetRemovalTimeJobHandler;
import org.camunda.bpm.engine.impl.batch.BatchJobHandler;
import org.camunda.bpm.engine.impl.batch.BatchMonitorJobHandler;
import org.camunda.bpm.engine.impl.batch.BatchSeedJobHandler;
import org.camunda.bpm.engine.impl.batch.deletion.DeleteHistoricProcessInstancesJobHandler;
import org.camunda.bpm.engine.impl.batch.deletion.DeleteProcessInstancesJobHandler;
import org.camunda.bpm.engine.impl.batch.externaltask.SetExternalTaskRetriesJobHandler;
import org.camunda.bpm.engine.impl.batch.job.SetJobRetriesJobHandler;
import org.camunda.bpm.engine.impl.batch.update.UpdateProcessInstancesSuspendStateJobHandler;
import org.camunda.bpm.engine.impl.bpmn.behavior.ExternalTaskActivityBehavior;
import org.camunda.bpm.engine.impl.bpmn.deployer.BpmnDeployer;
import org.camunda.bpm.engine.impl.bpmn.parser.BpmnParseListener;
import org.camunda.bpm.engine.impl.bpmn.parser.BpmnParser;
import org.camunda.bpm.engine.impl.bpmn.parser.DefaultFailedJobParseListener;
import org.camunda.bpm.engine.impl.calendar.BusinessCalendarManager;
import org.camunda.bpm.engine.impl.calendar.CycleBusinessCalendar;
import org.camunda.bpm.engine.impl.calendar.DueDateBusinessCalendar;
import org.camunda.bpm.engine.impl.calendar.DurationBusinessCalendar;
import org.camunda.bpm.engine.impl.calendar.MapBusinessCalendarManager;
import org.camunda.bpm.engine.impl.cfg.auth.AuthorizationCommandChecker;
import org.camunda.bpm.engine.impl.cfg.auth.DefaultAuthorizationProvider;
import org.camunda.bpm.engine.impl.cfg.auth.DefaultPermissionProvider;
import org.camunda.bpm.engine.impl.cfg.auth.PermissionProvider;
import org.camunda.bpm.engine.impl.cfg.auth.ResourceAuthorizationProvider;
import org.camunda.bpm.engine.impl.cfg.multitenancy.TenantCommandChecker;
import org.camunda.bpm.engine.impl.cfg.multitenancy.TenantIdProvider;
import org.camunda.bpm.engine.impl.cfg.standalone.StandaloneTransactionContextFactory;
import org.camunda.bpm.engine.impl.cmd.HistoryCleanupCmd;
import org.camunda.bpm.engine.impl.cmmn.CaseServiceImpl;
import org.camunda.bpm.engine.impl.cmmn.deployer.CmmnDeployer;
import org.camunda.bpm.engine.impl.cmmn.entity.repository.CaseDefinitionManager;
import org.camunda.bpm.engine.impl.cmmn.entity.runtime.CaseExecutionManager;
import org.camunda.bpm.engine.impl.cmmn.entity.runtime.CaseSentryPartManager;
import org.camunda.bpm.engine.impl.cmmn.handler.DefaultCmmnElementHandlerRegistry;
import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformFactory;
import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformListener;
import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformer;
import org.camunda.bpm.engine.impl.cmmn.transformer.DefaultCmmnTransformFactory;
import org.camunda.bpm.engine.impl.db.DbIdGenerator;
import org.camunda.bpm.engine.impl.db.entitymanager.DbEntityManagerFactory;
import org.camunda.bpm.engine.impl.db.entitymanager.cache.DbEntityCacheKeyMapping;
import org.camunda.bpm.engine.impl.db.sql.DbSqlPersistenceProviderFactory;
import org.camunda.bpm.engine.impl.db.sql.DbSqlSessionFactory;
import org.camunda.bpm.engine.impl.delegate.DefaultDelegateInterceptor;
import org.camunda.bpm.engine.impl.digest.Default16ByteSaltGenerator;
import org.camunda.bpm.engine.impl.digest.PasswordEncryptor;
import org.camunda.bpm.engine.impl.digest.PasswordManager;
import org.camunda.bpm.engine.impl.digest.SaltGenerator;
import org.camunda.bpm.engine.impl.digest.Sha512HashDigest;
import org.camunda.bpm.engine.impl.dmn.batch.DeleteHistoricDecisionInstancesJobHandler;
import org.camunda.bpm.engine.impl.dmn.configuration.DmnEngineConfigurationBuilder;
import org.camunda.bpm.engine.impl.dmn.deployer.DecisionDefinitionDeployer;
import org.camunda.bpm.engine.impl.dmn.deployer.DecisionRequirementsDefinitionDeployer;
import org.camunda.bpm.engine.impl.dmn.entity.repository.DecisionDefinitionManager;
import org.camunda.bpm.engine.impl.dmn.entity.repository.DecisionRequirementsDefinitionManager;
import org.camunda.bpm.engine.impl.el.CommandContextFunctionMapper;
import org.camunda.bpm.engine.impl.el.DateTimeFunctionMapper;
import org.camunda.bpm.engine.impl.el.ExpressionManager;
import org.camunda.bpm.engine.impl.event.CompensationEventHandler;
import org.camunda.bpm.engine.impl.event.ConditionalEventHandler;
import org.camunda.bpm.engine.impl.event.EventHandler;
import org.camunda.bpm.engine.impl.event.EventHandlerImpl;
import org.camunda.bpm.engine.impl.event.EventType;
import org.camunda.bpm.engine.impl.event.SignalEventHandler;
import org.camunda.bpm.engine.impl.externaltask.DefaultExternalTaskPriorityProvider;
import org.camunda.bpm.engine.impl.form.engine.FormEngine;
import org.camunda.bpm.engine.impl.form.engine.HtmlFormEngine;
import org.camunda.bpm.engine.impl.form.engine.JuelFormEngine;
import org.camunda.bpm.engine.impl.form.type.AbstractFormFieldType;
import org.camunda.bpm.engine.impl.form.type.BooleanFormType;
import org.camunda.bpm.engine.impl.form.type.DateFormType;
import org.camunda.bpm.engine.impl.form.type.FormTypes;
import org.camunda.bpm.engine.impl.form.type.LongFormType;
import org.camunda.bpm.engine.impl.form.type.StringFormType;
import org.camunda.bpm.engine.impl.form.validator.FormFieldValidator;
import org.camunda.bpm.engine.impl.form.validator.FormValidators;
import org.camunda.bpm.engine.impl.form.validator.MaxLengthValidator;
import org.camunda.bpm.engine.impl.form.validator.MaxValidator;
import org.camunda.bpm.engine.impl.form.validator.MinLengthValidator;
import org.camunda.bpm.engine.impl.form.validator.MinValidator;
import org.camunda.bpm.engine.impl.form.validator.ReadOnlyValidator;
import org.camunda.bpm.engine.impl.form.validator.RequiredValidator;
import org.camunda.bpm.engine.impl.history.DefaultHistoryRemovalTimeProvider;
import org.camunda.bpm.engine.impl.history.HistoryLevel;
import org.camunda.bpm.engine.impl.history.HistoryRemovalTimeProvider;
import org.camunda.bpm.engine.impl.history.event.HistoricDecisionInstanceManager;
import org.camunda.bpm.engine.impl.history.handler.DbHistoryEventHandler;
import org.camunda.bpm.engine.impl.history.handler.HistoryEventHandler;
import org.camunda.bpm.engine.impl.history.parser.HistoryParseListener;
import org.camunda.bpm.engine.impl.history.producer.CacheAwareCmmnHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.CacheAwareHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.CmmnHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.DefaultDmnHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.DmnHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.HistoryEventProducer;
import org.camunda.bpm.engine.impl.history.transformer.CmmnHistoryTransformListener;
import org.camunda.bpm.engine.impl.identity.DefaultPasswordPolicyImpl;
import org.camunda.bpm.engine.impl.identity.ReadOnlyIdentityProvider;
import org.camunda.bpm.engine.impl.identity.WritableIdentityProvider;
import org.camunda.bpm.engine.impl.identity.db.DbIdentityServiceProvider;
import org.camunda.bpm.engine.impl.incident.DefaultIncidentHandler;
import org.camunda.bpm.engine.impl.incident.IncidentHandler;
import org.camunda.bpm.engine.impl.interceptor.CommandContextFactory;
import org.camunda.bpm.engine.impl.interceptor.CommandExecutor;
import org.camunda.bpm.engine.impl.interceptor.CommandExecutorImpl;
import org.camunda.bpm.engine.impl.interceptor.CommandInterceptor;
import org.camunda.bpm.engine.impl.interceptor.DelegateInterceptor;
import org.camunda.bpm.engine.impl.interceptor.SessionFactory;
import org.camunda.bpm.engine.impl.jobexecutor.AsyncContinuationJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.DefaultFailedJobCommandFactory;
import org.camunda.bpm.engine.impl.jobexecutor.DefaultJobExecutor;
import org.camunda.bpm.engine.impl.jobexecutor.DefaultJobPriorityProvider;
import org.camunda.bpm.engine.impl.jobexecutor.FailedJobCommandFactory;
import org.camunda.bpm.engine.impl.jobexecutor.JobDeclaration;
import org.camunda.bpm.engine.impl.jobexecutor.JobExecutor;
import org.camunda.bpm.engine.impl.jobexecutor.JobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.NotifyAcquisitionRejectedJobsHandler;
import org.camunda.bpm.engine.impl.jobexecutor.ProcessEventJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.RejectedJobsHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerActivateJobDefinitionHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerActivateProcessDefinitionHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerCatchIntermediateEventJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerExecuteNestedActivityJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerStartEventJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerStartEventSubprocessJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerSuspendJobDefinitionHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerSuspendProcessDefinitionHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerTaskListenerJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.BatchWindowManager;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.DefaultBatchWindowManager;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupBatch;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupHandler;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupHelper;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupJobHandler;
import org.camunda.bpm.engine.impl.metrics.MetricsRegistry;
import org.camunda.bpm.engine.impl.metrics.MetricsReporterIdProvider;
import org.camunda.bpm.engine.impl.metrics.SimpleIpBasedProvider;
import org.camunda.bpm.engine.impl.metrics.parser.MetricsBpmnParseListener;
import org.camunda.bpm.engine.impl.metrics.parser.MetricsCmmnTransformListener;
import org.camunda.bpm.engine.impl.metrics.reporter.DbMetricsReporter;
import org.camunda.bpm.engine.impl.migration.DefaultMigrationActivityMatcher;
import org.camunda.bpm.engine.impl.migration.DefaultMigrationInstructionGenerator;
import org.camunda.bpm.engine.impl.migration.MigrationActivityMatcher;
import org.camunda.bpm.engine.impl.migration.MigrationInstructionGenerator;
import org.camunda.bpm.engine.impl.migration.batch.MigrationBatchJobHandler;
import org.camunda.bpm.engine.impl.migration.validation.activity.MigrationActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.activity.NoCompensationHandlerActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.activity.SupportedActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.activity.SupportedPassiveEventTriggerActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncAfterMigrationValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncMigrationValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncProcessStartMigrationValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingActivityInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingCompensationInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingTransitionInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.NoUnmappedCompensationStartEventValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.NoUnmappedLeafInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.SupportedActivityInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.VariableConflictActivityInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.AdditionalFlowScopeInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotAddMultiInstanceBodyValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotAddMultiInstanceInnerActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotRemoveMultiInstanceInnerActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.ConditionalEventUpdateEventTriggerValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.GatewayMappingValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.MigrationInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.OnlyOnceMappedActivityInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.SameBehaviorInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.SameEventScopeInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.SameEventTypeValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.UpdateEventTriggersValidator;
import org.camunda.bpm.engine.impl.optimize.OptimizeManager;
import org.camunda.bpm.engine.impl.persistence.GenericManagerFactory;
import org.camunda.bpm.engine.impl.persistence.deploy.Deployer;
import org.camunda.bpm.engine.impl.persistence.deploy.cache.CacheFactory;
import org.camunda.bpm.engine.impl.persistence.deploy.cache.DefaultCacheFactory;
import org.camunda.bpm.engine.impl.persistence.deploy.cache.DeploymentCache;
import org.camunda.bpm.engine.impl.persistence.entity.AttachmentManager;
import org.camunda.bpm.engine.impl.persistence.entity.AuthorizationManager;
import org.camunda.bpm.engine.impl.persistence.entity.BatchManager;
import org.camunda.bpm.engine.impl.persistence.entity.ByteArrayManager;
import org.camunda.bpm.engine.impl.persistence.entity.CommentManager;
import org.camunda.bpm.engine.impl.persistence.entity.DeploymentManager;
import org.camunda.bpm.engine.impl.persistence.entity.EventSubscriptionManager;
import org.camunda.bpm.engine.impl.persistence.entity.ExecutionManager;
import org.camunda.bpm.engine.impl.persistence.entity.ExternalTaskManager;
import org.camunda.bpm.engine.impl.persistence.entity.FilterManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricActivityInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricBatchManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricCaseActivityInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricCaseInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricDetailManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricExternalTaskLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricIdentityLinkLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricIncidentManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricJobLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricProcessInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricStatisticsManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricTaskInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricVariableInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.IdentityInfoManager;
import org.camunda.bpm.engine.impl.persistence.entity.IdentityLinkManager;
import org.camunda.bpm.engine.impl.persistence.entity.IncidentManager;
import org.camunda.bpm.engine.impl.persistence.entity.JobDefinitionManager;
import org.camunda.bpm.engine.impl.persistence.entity.JobManager;
import org.camunda.bpm.engine.impl.persistence.entity.MeterLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.ProcessDefinitionManager;
import org.camunda.bpm.engine.impl.persistence.entity.PropertyManager;
import org.camunda.bpm.engine.impl.persistence.entity.ReportManager;
import org.camunda.bpm.engine.impl.persistence.entity.ResourceManager;
import org.camunda.bpm.engine.impl.persistence.entity.SchemaLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.StatisticsManager;
import org.camunda.bpm.engine.impl.persistence.entity.TableDataManager;
import org.camunda.bpm.engine.impl.persistence.entity.TaskManager;
import org.camunda.bpm.engine.impl.persistence.entity.TaskReportManager;
import org.camunda.bpm.engine.impl.persistence.entity.TenantManager;
import org.camunda.bpm.engine.impl.persistence.entity.UserOperationLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.VariableInstanceManager;
import org.camunda.bpm.engine.impl.repository.DefaultDeploymentHandlerFactory;
import org.camunda.bpm.engine.impl.runtime.ConditionHandler;
import org.camunda.bpm.engine.impl.runtime.CorrelationHandler;
import org.camunda.bpm.engine.impl.runtime.DefaultConditionHandler;
import org.camunda.bpm.engine.impl.runtime.DefaultCorrelationHandler;
import org.camunda.bpm.engine.impl.runtime.DefaultDeserializationTypeValidator;
import org.camunda.bpm.engine.impl.scripting.ScriptFactory;
import org.camunda.bpm.engine.impl.scripting.engine.BeansResolverFactory;
import org.camunda.bpm.engine.impl.scripting.engine.ResolverFactory;
import org.camunda.bpm.engine.impl.scripting.engine.ScriptBindingsFactory;
import org.camunda.bpm.engine.impl.scripting.engine.ScriptingEngines;
import org.camunda.bpm.engine.impl.scripting.engine.VariableScopeResolverFactory;
import org.camunda.bpm.engine.impl.scripting.env.ScriptEnvResolver;
import org.camunda.bpm.engine.impl.scripting.env.ScriptingEnvironment;
import org.camunda.bpm.engine.impl.util.IoUtil;
import org.camunda.bpm.engine.impl.util.ParseUtil;
import org.camunda.bpm.engine.impl.util.ReflectUtil;
import org.camunda.bpm.engine.impl.variable.ValueTypeResolverImpl;
import org.camunda.bpm.engine.impl.variable.serializer.BooleanValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.ByteArrayValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.DateValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.DefaultVariableSerializers;
import org.camunda.bpm.engine.impl.variable.serializer.DoubleValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.FileValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.IntegerValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.JavaObjectSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.LongValueSerlializer;
import org.camunda.bpm.engine.impl.variable.serializer.NullValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.ShortValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.StringValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.TypedValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.VariableSerializerFactory;
import org.camunda.bpm.engine.impl.variable.serializer.VariableSerializers;
import org.camunda.bpm.engine.impl.variable.serializer.jpa.EntityManagerSession;
import org.camunda.bpm.engine.impl.variable.serializer.jpa.EntityManagerSessionFactory;
import org.camunda.bpm.engine.impl.variable.serializer.jpa.JPAVariableSerializer;
import org.camunda.bpm.engine.management.Metrics;
import org.camunda.bpm.engine.repository.DeploymentBuilder;
import org.camunda.bpm.engine.repository.DeploymentHandlerFactory;
import org.camunda.bpm.engine.runtime.Incident;
import org.camunda.bpm.engine.runtime.WhitelistingDeserializationTypeValidator;
import org.camunda.bpm.engine.test.mock.MocksResolverFactory;
import org.camunda.bpm.engine.variable.Variables;
import org.camunda.bpm.engine.variable.type.ValueType;
/**
* @author Tom Baeyens
*/
public abstract class ProcessEngineConfigurationImpl extends ProcessEngineConfiguration {
protected final static ConfigurationLogger LOG = ConfigurationLogger.CONFIG_LOGGER;
public static final String DB_SCHEMA_UPDATE_CREATE = "create";
public static final String DB_SCHEMA_UPDATE_DROP_CREATE = "drop-create";
public static final int HISTORYLEVEL_NONE = HistoryLevel.HISTORY_LEVEL_NONE.getId();
public static final int HISTORYLEVEL_ACTIVITY = HistoryLevel.HISTORY_LEVEL_ACTIVITY.getId();
public static final int HISTORYLEVEL_AUDIT = HistoryLevel.HISTORY_LEVEL_AUDIT.getId();
public static final int HISTORYLEVEL_FULL = HistoryLevel.HISTORY_LEVEL_FULL.getId();
public static final String DEFAULT_WS_SYNC_FACTORY = "org.camunda.bpm.engine.impl.webservice.CxfWebServiceClientFactory";
public static final String DEFAULT_MYBATIS_MAPPING_FILE = "org/camunda/bpm/engine/impl/mapping/mappings.xml";
public static final int DEFAULT_FAILED_JOB_LISTENER_MAX_RETRIES = 3;
public static SqlSessionFactory cachedSqlSessionFactory;
// SERVICES /////////////////////////////////////////////////////////////////
protected RepositoryService repositoryService = new RepositoryServiceImpl();
protected RuntimeService runtimeService = new RuntimeServiceImpl();
protected HistoryService historyService = new HistoryServiceImpl();
protected IdentityService identityService = new IdentityServiceImpl();
protected TaskService taskService = new TaskServiceImpl();
protected FormService formService = new FormServiceImpl();
protected ManagementService managementService = new ManagementServiceImpl();
protected AuthorizationService authorizationService = new AuthorizationServiceImpl();
protected CaseService caseService = new CaseServiceImpl();
protected FilterService filterService = new FilterServiceImpl();
protected ExternalTaskService externalTaskService = new ExternalTaskServiceImpl();
protected DecisionService decisionService = new DecisionServiceImpl();
protected OptimizeService optimizeService = new OptimizeService();
// COMMAND EXECUTORS ////////////////////////////////////////////////////////
// Command executor and interceptor stack
/**
* the configurable list which will be {@link #initInterceptorChain(java.util.List) processed} to build the {@link #commandExecutorTxRequired}
*/
protected List<CommandInterceptor> customPreCommandInterceptorsTxRequired;
protected List<CommandInterceptor> customPostCommandInterceptorsTxRequired;
protected List<CommandInterceptor> commandInterceptorsTxRequired;
/**
* this will be initialized during the configurationComplete()
*/
protected CommandExecutor commandExecutorTxRequired;
/**
* the configurable list which will be {@link #initInterceptorChain(List) processed} to build the {@link #commandExecutorTxRequiresNew}
*/
protected List<CommandInterceptor> customPreCommandInterceptorsTxRequiresNew;
protected List<CommandInterceptor> customPostCommandInterceptorsTxRequiresNew;
protected List<CommandInterceptor> commandInterceptorsTxRequiresNew;
/**
* this will be initialized during the configurationComplete()
*/
protected CommandExecutor commandExecutorTxRequiresNew;
/**
* Separate command executor to be used for db schema operations. Must always use NON-JTA transactions
*/
protected CommandExecutor commandExecutorSchemaOperations;
// SESSION FACTORIES ////////////////////////////////////////////////////////
protected List<SessionFactory> customSessionFactories;
protected DbSqlSessionFactory dbSqlSessionFactory;
protected Map<Class<?>, SessionFactory> sessionFactories;
// DEPLOYERS ////////////////////////////////////////////////////////////////
protected List<Deployer> customPreDeployers;
protected List<Deployer> customPostDeployers;
protected List<Deployer> deployers;
protected DeploymentCache deploymentCache;
// CACHE ////////////////////////////////////////////////////////////////////
protected CacheFactory cacheFactory;
protected int cacheCapacity = 1000;
protected boolean enableFetchProcessDefinitionDescription = true;
// JOB EXECUTOR /////////////////////////////////////////////////////////////
protected List<JobHandler> customJobHandlers;
protected Map<String, JobHandler> jobHandlers;
protected JobExecutor jobExecutor;
protected PriorityProvider<JobDeclaration<?, ?>> jobPriorityProvider;
// EXTERNAL TASK /////////////////////////////////////////////////////////////
protected PriorityProvider<ExternalTaskActivityBehavior> externalTaskPriorityProvider;
// MYBATIS SQL SESSION FACTORY //////////////////////////////////////////////
protected SqlSessionFactory sqlSessionFactory;
protected TransactionFactory transactionFactory;
// ID GENERATOR /////////////////////////////////////////////////////////////
protected IdGenerator idGenerator;
protected DataSource idGeneratorDataSource;
protected String idGeneratorDataSourceJndiName;
// INCIDENT HANDLER /////////////////////////////////////////////////////////
protected Map<String, IncidentHandler> incidentHandlers;
protected List<IncidentHandler> customIncidentHandlers;
// BATCH ////////////////////////////////////////////////////////////////////
protected Map<String, BatchJobHandler<?>> batchHandlers;
protected List<BatchJobHandler<?>> customBatchJobHandlers;
/**
* Number of jobs created by a batch seed job invocation
*/
protected int batchJobsPerSeed = 100;
/**
* Number of invocations executed by a single batch job
*/
protected int invocationsPerBatchJob = 1;
/**
* seconds to wait between polling for batch completion
*/
protected int batchPollTime = 30;
/**
* default priority for batch jobs
*/
protected long batchJobPriority = DefaultJobPriorityProvider.DEFAULT_PRIORITY;
// OTHER ////////////////////////////////////////////////////////////////////
protected List<FormEngine> customFormEngines;
protected Map<String, FormEngine> formEngines;
protected List<AbstractFormFieldType> customFormTypes;
protected FormTypes formTypes;
protected FormValidators formValidators;
protected Map<String, Class<? extends FormFieldValidator>> customFormFieldValidators;
protected List<TypedValueSerializer> customPreVariableSerializers;
protected List<TypedValueSerializer> customPostVariableSerializers;
protected VariableSerializers variableSerializers;
protected VariableSerializerFactory fallbackSerializerFactory;
protected String defaultSerializationFormat = Variables.SerializationDataFormats.JAVA.getName();
protected boolean javaSerializationFormatEnabled = false;
protected String defaultCharsetName = null;
protected Charset defaultCharset = null;
protected ExpressionManager expressionManager;
protected ScriptingEngines scriptingEngines;
protected List<ResolverFactory> resolverFactories;
protected ScriptingEnvironment scriptingEnvironment;
protected List<ScriptEnvResolver> scriptEnvResolvers;
protected ScriptFactory scriptFactory;
protected boolean autoStoreScriptVariables = false;
protected boolean enableScriptCompilation = true;
protected boolean enableScriptEngineCaching = true;
protected boolean enableFetchScriptEngineFromProcessApplication = true;
protected boolean cmmnEnabled = true;
protected boolean dmnEnabled = true;
protected boolean enableGracefulDegradationOnContextSwitchFailure = true;
protected BusinessCalendarManager businessCalendarManager;
protected String wsSyncFactoryClassName = DEFAULT_WS_SYNC_FACTORY;
protected CommandContextFactory commandContextFactory;
protected TransactionContextFactory transactionContextFactory;
protected BpmnParseFactory bpmnParseFactory;
// cmmn
protected CmmnTransformFactory cmmnTransformFactory;
protected DefaultCmmnElementHandlerRegistry cmmnElementHandlerRegistry;
// dmn
protected DefaultDmnEngineConfiguration dmnEngineConfiguration;
protected DmnEngine dmnEngine;
protected HistoryLevel historyLevel;
/**
* a list of supported history levels
*/
protected List<HistoryLevel> historyLevels;
/**
* a list of supported custom history levels
*/
protected List<HistoryLevel> customHistoryLevels;
protected List<BpmnParseListener> preParseListeners;
protected List<BpmnParseListener> postParseListeners;
protected List<CmmnTransformListener> customPreCmmnTransformListeners;
protected List<CmmnTransformListener> customPostCmmnTransformListeners;
protected Map<Object, Object> beans;
protected boolean isDbIdentityUsed = true;
protected boolean isDbHistoryUsed = true;
protected DelegateInterceptor delegateInterceptor;
protected CommandInterceptor actualCommandExecutor;
protected RejectedJobsHandler customRejectedJobsHandler;
protected Map<String, EventHandler> eventHandlers;
protected List<EventHandler> customEventHandlers;
protected FailedJobCommandFactory failedJobCommandFactory;
protected String databaseTablePrefix = "";
/**
* In some situations you want to set the schema to use for table checks / generation if the database metadata
* doesn't return that correctly, see https://jira.codehaus.org/browse/ACT-1220,
* https://jira.codehaus.org/browse/ACT-1062
*/
protected String databaseSchema = null;
protected boolean isCreateDiagramOnDeploy = false;
protected ProcessApplicationManager processApplicationManager;
protected CorrelationHandler correlationHandler;
protected ConditionHandler conditionHandler;
/**
* session factory to be used for obtaining identity provider sessions
*/
protected SessionFactory identityProviderSessionFactory;
protected PasswordEncryptor passwordEncryptor;
protected List<PasswordEncryptor> customPasswordChecker;
protected PasswordManager passwordManager;
protected SaltGenerator saltGenerator;
protected Set<String> registeredDeployments;
protected DeploymentHandlerFactory deploymentHandlerFactory;
protected ResourceAuthorizationProvider resourceAuthorizationProvider;
protected List<ProcessEnginePlugin> processEnginePlugins = new ArrayList<>();
protected HistoryEventProducer historyEventProducer;
protected CmmnHistoryEventProducer cmmnHistoryEventProducer;
protected DmnHistoryEventProducer dmnHistoryEventProducer;
protected HistoryEventHandler historyEventHandler;
protected PermissionProvider permissionProvider;
protected boolean isExecutionTreePrefetchEnabled = true;
/**
* If true the process engine will attempt to acquire an exclusive lock before
* creating a deployment.
*/
protected boolean isDeploymentLockUsed = true;
/**
* If true then several deployments will be processed strictly sequentally. When false they may be processed in parallel.
*/
protected boolean isDeploymentSynchronized = true;
/**
* Allows setting whether the process engine should try reusing the first level entity cache.
* Default setting is false, enabling it improves performance of asynchronous continuations.
*/
protected boolean isDbEntityCacheReuseEnabled = false;
protected boolean isInvokeCustomVariableListeners = true;
/**
* The process engine created by this configuration.
*/
protected ProcessEngineImpl processEngine;
/**
* used to create instances for listeners, JavaDelegates, etc
*/
protected ArtifactFactory artifactFactory;
protected DbEntityCacheKeyMapping dbEntityCacheKeyMapping = DbEntityCacheKeyMapping.defaultEntityCacheKeyMapping();
/**
* the metrics registry
*/
protected MetricsRegistry metricsRegistry;
protected DbMetricsReporter dbMetricsReporter;
protected boolean isMetricsEnabled = true;
protected boolean isDbMetricsReporterActivate = true;
protected MetricsReporterIdProvider metricsReporterIdProvider;
/**
* handling of expressions submitted via API; can be used as guards against remote code execution
*/
protected boolean enableExpressionsInAdhocQueries = false;
protected boolean enableExpressionsInStoredQueries = true;
/**
* If false, disables XML eXternal Entity (XXE) Processing. This provides protection against XXE Processing attacks.
*/
protected boolean enableXxeProcessing = false;
/**
* If true, user operation log entries are only written if there is an
* authenticated user present in the context. If false, user operation log
* entries are written regardless of authentication state.
*/
protected boolean restrictUserOperationLogToAuthenticatedUsers = true;
protected boolean disableStrictCallActivityValidation = false;
protected boolean isBpmnStacktraceVerbose = false;
protected boolean forceCloseMybatisConnectionPool = true;
protected TenantIdProvider tenantIdProvider = null;
protected List<CommandChecker> commandCheckers = null;
protected List<String> adminGroups;
protected List<String> adminUsers;
// Migration
protected MigrationActivityMatcher migrationActivityMatcher;
protected List<MigrationActivityValidator> customPreMigrationActivityValidators;
protected List<MigrationActivityValidator> customPostMigrationActivityValidators;
protected MigrationInstructionGenerator migrationInstructionGenerator;
protected List<MigrationInstructionValidator> customPreMigrationInstructionValidators;
protected List<MigrationInstructionValidator> customPostMigrationInstructionValidators;
protected List<MigrationInstructionValidator> migrationInstructionValidators;
protected List<MigratingActivityInstanceValidator> customPreMigratingActivityInstanceValidators;
protected List<MigratingActivityInstanceValidator> customPostMigratingActivityInstanceValidators;
protected List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators;
protected List<MigratingTransitionInstanceValidator> migratingTransitionInstanceValidators;
protected List<MigratingCompensationInstanceValidator> migratingCompensationInstanceValidators;
// Default user permission for task
protected Permission defaultUserPermissionForTask;
protected boolean isUseSharedSqlSessionFactory = false;
//History cleanup configuration
protected String historyCleanupBatchWindowStartTime;
protected String historyCleanupBatchWindowEndTime = "00:00";
protected Date historyCleanupBatchWindowStartTimeAsDate;
protected Date historyCleanupBatchWindowEndTimeAsDate;
protected Map<Integer, BatchWindowConfiguration> historyCleanupBatchWindows = new HashMap<>();
//shortcuts for batch windows configuration available to be configured from XML
protected String mondayHistoryCleanupBatchWindowStartTime;
protected String mondayHistoryCleanupBatchWindowEndTime;
protected String tuesdayHistoryCleanupBatchWindowStartTime;
protected String tuesdayHistoryCleanupBatchWindowEndTime;
protected String wednesdayHistoryCleanupBatchWindowStartTime;
protected String wednesdayHistoryCleanupBatchWindowEndTime;
protected String thursdayHistoryCleanupBatchWindowStartTime;
protected String thursdayHistoryCleanupBatchWindowEndTime;
protected String fridayHistoryCleanupBatchWindowStartTime;
protected String fridayHistoryCleanupBatchWindowEndTime;
protected String saturdayHistoryCleanupBatchWindowStartTime;
protected String saturdayHistoryCleanupBatchWindowEndTime;
protected String sundayHistoryCleanupBatchWindowStartTime;
protected String sundayHistoryCleanupBatchWindowEndTime;
protected int historyCleanupDegreeOfParallelism = 1;
protected String historyTimeToLive;
protected String batchOperationHistoryTimeToLive;
protected Map<String, String> batchOperationsForHistoryCleanup;
protected Map<String, Integer> parsedBatchOperationsForHistoryCleanup;
protected BatchWindowManager batchWindowManager = new DefaultBatchWindowManager();
protected HistoryRemovalTimeProvider historyRemovalTimeProvider;
protected String historyRemovalTimeStrategy;
protected String historyCleanupStrategy;
/**
* Size of batch in which history cleanup data will be deleted. {@link HistoryCleanupBatch#MAX_BATCH_SIZE} must be respected.
*/
private int historyCleanupBatchSize = 500;
/**
* Indicates the minimal amount of data to trigger the history cleanup.
*/
private int historyCleanupBatchThreshold = 10;
private boolean historyCleanupMetricsEnabled = true;
private int failedJobListenerMaxRetries = DEFAULT_FAILED_JOB_LISTENER_MAX_RETRIES;
protected String failedJobRetryTimeCycle;
// login attempts ///////////////////////////////////////////////////////
protected int loginMaxAttempts = 10;
protected int loginDelayFactor = 2;
protected int loginDelayMaxTime = 60;
protected int loginDelayBase = 3;
// max results limit
protected int queryMaxResultsLimit = Integer.MAX_VALUE;
// logging context property names (with default values)
protected String loggingContextActivityId = "activityId";
protected String loggingContextApplicationName = "applicationName";
protected String loggingContextBusinessKey;// default == null => disabled by default
protected String loggingContextProcessDefinitionId = "processDefinitionId";
protected String loggingContextProcessInstanceId = "processInstanceId";
protected String loggingContextTenantId = "tenantId";
// buildProcessEngine ///////////////////////////////////////////////////////
@Override
public ProcessEngine buildProcessEngine() {
init();
processEngine = new ProcessEngineImpl(this);
invokePostProcessEngineBuild(processEngine);
return processEngine;
}
// init /////////////////////////////////////////////////////////////////////
protected void init() {
invokePreInit();
initDefaultCharset();
initHistoryLevel();
initHistoryEventProducer();
initCmmnHistoryEventProducer();
initDmnHistoryEventProducer();
initHistoryEventHandler();
initExpressionManager();
initBeans();
initArtifactFactory();
initFormEngines();
initFormTypes();
initFormFieldValidators();
initScripting();
initDmnEngine();
initBusinessCalendarManager();
initCommandContextFactory();
initTransactionContextFactory();
initCommandExecutors();
initServices();
initIdGenerator();
initFailedJobCommandFactory();
initDeployers();
initJobProvider();
initExternalTaskPriorityProvider();
initBatchHandlers();
initJobExecutor();
initDataSource();
initTransactionFactory();
initSqlSessionFactory();
initIdentityProviderSessionFactory();
initSessionFactories();
initValueTypeResolver();
initTypeValidator();
initSerialization();
initJpa();
initDelegateInterceptor();
initEventHandlers();
initProcessApplicationManager();
initCorrelationHandler();
initConditionHandler();
initIncidentHandlers();
initPasswordDigest();
initDeploymentRegistration();
initDeploymentHandlerFactory();
initResourceAuthorizationProvider();
initPermissionProvider();
initMetrics();
initMigration();
initCommandCheckers();
initDefaultUserPermissionForTask();
initHistoryRemovalTime();
initHistoryCleanup();
initAdminUser();
initAdminGroups();
initPasswordPolicy();
invokePostInit();
}
protected void initTypeValidator() {
if (deserializationTypeValidator == null) {
deserializationTypeValidator = new DefaultDeserializationTypeValidator();
}
if (deserializationTypeValidator instanceof WhitelistingDeserializationTypeValidator) {
WhitelistingDeserializationTypeValidator validator = (WhitelistingDeserializationTypeValidator) deserializationTypeValidator;
validator.setAllowedClasses(deserializationAllowedClasses);
validator.setAllowedPackages(deserializationAllowedPackages);
}
}
public void initHistoryRemovalTime() {
initHistoryRemovalTimeProvider();
initHistoryRemovalTimeStrategy();
}
public void initHistoryRemovalTimeStrategy() {
if (historyRemovalTimeStrategy == null) {
historyRemovalTimeStrategy = HISTORY_REMOVAL_TIME_STRATEGY_END;
}
if (!HISTORY_REMOVAL_TIME_STRATEGY_START.equals(historyRemovalTimeStrategy) &&
!HISTORY_REMOVAL_TIME_STRATEGY_END.equals(historyRemovalTimeStrategy) &&
!HISTORY_REMOVAL_TIME_STRATEGY_NONE.equals(historyRemovalTimeStrategy)) {
throw LOG.invalidPropertyValue("historyRemovalTimeStrategy", String.valueOf(historyRemovalTimeStrategy),
String.format("history removal time strategy must be set to '%s', '%s' or '%s'", HISTORY_REMOVAL_TIME_STRATEGY_START, HISTORY_REMOVAL_TIME_STRATEGY_END, HISTORY_REMOVAL_TIME_STRATEGY_NONE));
}
}
public void initHistoryRemovalTimeProvider() {
if (historyRemovalTimeProvider == null) {
historyRemovalTimeProvider = new DefaultHistoryRemovalTimeProvider();
}
}
public void initHistoryCleanup() {
initHistoryCleanupStrategy();
//validate number of threads
if (historyCleanupDegreeOfParallelism < 1 || historyCleanupDegreeOfParallelism > MAX_THREADS_NUMBER) {
throw LOG.invalidPropertyValue("historyCleanupDegreeOfParallelism", String.valueOf(historyCleanupDegreeOfParallelism),
String.format("value for number of threads for history cleanup should be between 1 and %s", HistoryCleanupCmd.MAX_THREADS_NUMBER));
}
if (historyCleanupBatchWindowStartTime != null) {
initHistoryCleanupBatchWindowStartTime();
}
if (historyCleanupBatchWindowEndTime != null) {
initHistoryCleanupBatchWindowEndTime();
}
initHistoryCleanupBatchWindowsMap();
if (historyCleanupBatchSize > HistoryCleanupHandler.MAX_BATCH_SIZE || historyCleanupBatchSize <= 0) {
throw LOG.invalidPropertyValue("historyCleanupBatchSize", String.valueOf(historyCleanupBatchSize),
String.format("value for batch size should be between 1 and %s", HistoryCleanupHandler.MAX_BATCH_SIZE));
}
if (historyCleanupBatchThreshold < 0) {
throw LOG.invalidPropertyValue("historyCleanupBatchThreshold", String.valueOf(historyCleanupBatchThreshold),
"History cleanup batch threshold cannot be negative.");
}
initHistoryTimeToLive();
initBatchOperationsHistoryTimeToLive();
}
protected void initHistoryCleanupStrategy() {
if (historyCleanupStrategy == null) {
historyCleanupStrategy = HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED;
}
if (!HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED.equals(historyCleanupStrategy) &&
!HISTORY_CLEANUP_STRATEGY_END_TIME_BASED.equals(historyCleanupStrategy)) {
throw LOG.invalidPropertyValue("historyCleanupStrategy", String.valueOf(historyCleanupStrategy),
String.format("history cleanup strategy must be either set to '%s' or '%s'", HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED, HISTORY_CLEANUP_STRATEGY_END_TIME_BASED));
}
if (HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED.equals(historyCleanupStrategy) &&
HISTORY_REMOVAL_TIME_STRATEGY_NONE.equals(historyRemovalTimeStrategy)) {
throw LOG.invalidPropertyValue("historyRemovalTimeStrategy", String.valueOf(historyRemovalTimeStrategy),
String.format("history removal time strategy cannot be set to '%s' in conjunction with '%s' history cleanup strategy", HISTORY_REMOVAL_TIME_STRATEGY_NONE, HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED));
}
}
private void initHistoryCleanupBatchWindowsMap() {
if (mondayHistoryCleanupBatchWindowStartTime != null || mondayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.MONDAY, new BatchWindowConfiguration(mondayHistoryCleanupBatchWindowStartTime, mondayHistoryCleanupBatchWindowEndTime));
}
if (tuesdayHistoryCleanupBatchWindowStartTime != null || tuesdayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.TUESDAY, new BatchWindowConfiguration(tuesdayHistoryCleanupBatchWindowStartTime, tuesdayHistoryCleanupBatchWindowEndTime));
}
if (wednesdayHistoryCleanupBatchWindowStartTime != null || wednesdayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.WEDNESDAY, new BatchWindowConfiguration(wednesdayHistoryCleanupBatchWindowStartTime, wednesdayHistoryCleanupBatchWindowEndTime));
}
if (thursdayHistoryCleanupBatchWindowStartTime != null || thursdayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.THURSDAY, new BatchWindowConfiguration(thursdayHistoryCleanupBatchWindowStartTime, thursdayHistoryCleanupBatchWindowEndTime));
}
if (fridayHistoryCleanupBatchWindowStartTime != null || fridayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.FRIDAY, new BatchWindowConfiguration(fridayHistoryCleanupBatchWindowStartTime, fridayHistoryCleanupBatchWindowEndTime));
}
if (saturdayHistoryCleanupBatchWindowStartTime != null ||saturdayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.SATURDAY, new BatchWindowConfiguration(saturdayHistoryCleanupBatchWindowStartTime, saturdayHistoryCleanupBatchWindowEndTime));
}
if (sundayHistoryCleanupBatchWindowStartTime != null || sundayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.SUNDAY, new BatchWindowConfiguration(sundayHistoryCleanupBatchWindowStartTime, sundayHistoryCleanupBatchWindowEndTime));
}
}
protected void initHistoryTimeToLive() {
try {
ParseUtil.parseHistoryTimeToLive(historyTimeToLive);
} catch (Exception e) {
throw LOG.invalidPropertyValue("historyTimeToLive", historyTimeToLive, e);
}
}
protected void initBatchOperationsHistoryTimeToLive() {
try {
ParseUtil.parseHistoryTimeToLive(batchOperationHistoryTimeToLive);
} catch (Exception e) {
throw LOG.invalidPropertyValue("batchOperationHistoryTimeToLive", batchOperationHistoryTimeToLive, e);
}
if (batchOperationsForHistoryCleanup == null) {
batchOperationsForHistoryCleanup = new HashMap<>();
} else {
for (String batchOperation : batchOperationsForHistoryCleanup.keySet()) {
String timeToLive = batchOperationsForHistoryCleanup.get(batchOperation);
if (!batchHandlers.keySet().contains(batchOperation)) {
LOG.invalidBatchOperation(batchOperation, timeToLive);
}
try {
ParseUtil.parseHistoryTimeToLive(timeToLive);
} catch (Exception e) {
throw LOG.invalidPropertyValue("history time to live for " + batchOperation + " batch operations", timeToLive, e);
}
}
}
if (batchHandlers != null && batchOperationHistoryTimeToLive != null) {
for (String batchOperation : batchHandlers.keySet()) {
if (!batchOperationsForHistoryCleanup.containsKey(batchOperation)) {
batchOperationsForHistoryCleanup.put(batchOperation, batchOperationHistoryTimeToLive);
}
}
}
parsedBatchOperationsForHistoryCleanup = new HashMap<>();
if (batchOperationsForHistoryCleanup != null) {
for (String operation : batchOperationsForHistoryCleanup.keySet()) {
Integer historyTimeToLive = ParseUtil.parseHistoryTimeToLive(batchOperationsForHistoryCleanup.get(operation));
parsedBatchOperationsForHistoryCleanup.put(operation, historyTimeToLive);
}
}
}
private void initHistoryCleanupBatchWindowEndTime() {
try {
historyCleanupBatchWindowEndTimeAsDate = HistoryCleanupHelper.parseTimeConfiguration(historyCleanupBatchWindowEndTime);
} catch (ParseException e) {
throw LOG.invalidPropertyValue("historyCleanupBatchWindowEndTime", historyCleanupBatchWindowEndTime);
}
}
private void initHistoryCleanupBatchWindowStartTime() {
try {
historyCleanupBatchWindowStartTimeAsDate = HistoryCleanupHelper.parseTimeConfiguration(historyCleanupBatchWindowStartTime);
} catch (ParseException e) {
throw LOG.invalidPropertyValue("historyCleanupBatchWindowStartTime", historyCleanupBatchWindowStartTime);
}
}
protected void invokePreInit() {
for (ProcessEnginePlugin plugin : processEnginePlugins) {
LOG.pluginActivated(plugin.toString(), getProcessEngineName());
plugin.preInit(this);
}
}
protected void invokePostInit() {
for (ProcessEnginePlugin plugin : processEnginePlugins) {
plugin.postInit(this);
}
}
protected void invokePostProcessEngineBuild(ProcessEngine engine) {
for (ProcessEnginePlugin plugin : processEnginePlugins) {
plugin.postProcessEngineBuild(engine);
}
}
// failedJobCommandFactory ////////////////////////////////////////////////////////
protected void initFailedJobCommandFactory() {
if (failedJobCommandFactory == null) {
failedJobCommandFactory = new DefaultFailedJobCommandFactory();
}
if (postParseListeners == null) {
postParseListeners = new ArrayList<>();
}
postParseListeners.add(new DefaultFailedJobParseListener());
}
// incident handlers /////////////////////////////////////////////////////////////
protected void initIncidentHandlers() {
if (incidentHandlers == null) {
incidentHandlers = new HashMap<>();
DefaultIncidentHandler failedJobIncidentHandler = new DefaultIncidentHandler(Incident.FAILED_JOB_HANDLER_TYPE);
incidentHandlers.put(failedJobIncidentHandler.getIncidentHandlerType(), failedJobIncidentHandler);
DefaultIncidentHandler failedExternalTaskIncidentHandler = new DefaultIncidentHandler(Incident.EXTERNAL_TASK_HANDLER_TYPE);
incidentHandlers.put(failedExternalTaskIncidentHandler.getIncidentHandlerType(), failedExternalTaskIncidentHandler);
}
if (customIncidentHandlers != null) {
for (IncidentHandler incidentHandler : customIncidentHandlers) {
incidentHandlers.put(incidentHandler.getIncidentHandlerType(), incidentHandler);
}
}
}
// batch ///////////////////////////////////////////////////////////////////////
protected void initBatchHandlers() {
if (batchHandlers == null) {
batchHandlers = new HashMap<>();
MigrationBatchJobHandler migrationHandler = new MigrationBatchJobHandler();
batchHandlers.put(migrationHandler.getType(), migrationHandler);
ModificationBatchJobHandler modificationHandler = new ModificationBatchJobHandler();
batchHandlers.put(modificationHandler.getType(), modificationHandler);
DeleteProcessInstancesJobHandler deleteProcessJobHandler = new DeleteProcessInstancesJobHandler();
batchHandlers.put(deleteProcessJobHandler.getType(), deleteProcessJobHandler);
DeleteHistoricProcessInstancesJobHandler deleteHistoricProcessInstancesJobHandler = new DeleteHistoricProcessInstancesJobHandler();
batchHandlers.put(deleteHistoricProcessInstancesJobHandler.getType(), deleteHistoricProcessInstancesJobHandler);
SetJobRetriesJobHandler setJobRetriesJobHandler = new SetJobRetriesJobHandler();
batchHandlers.put(setJobRetriesJobHandler.getType(), setJobRetriesJobHandler);
SetExternalTaskRetriesJobHandler setExternalTaskRetriesJobHandler = new SetExternalTaskRetriesJobHandler();
batchHandlers.put(setExternalTaskRetriesJobHandler.getType(), setExternalTaskRetriesJobHandler);
RestartProcessInstancesJobHandler restartProcessInstancesJobHandler = new RestartProcessInstancesJobHandler();
batchHandlers.put(restartProcessInstancesJobHandler.getType(), restartProcessInstancesJobHandler);
UpdateProcessInstancesSuspendStateJobHandler suspendProcessInstancesJobHandler = new UpdateProcessInstancesSuspendStateJobHandler();
batchHandlers.put(suspendProcessInstancesJobHandler.getType(), suspendProcessInstancesJobHandler);
DeleteHistoricDecisionInstancesJobHandler deleteHistoricDecisionInstancesJobHandler = new DeleteHistoricDecisionInstancesJobHandler();
batchHandlers.put(deleteHistoricDecisionInstancesJobHandler.getType(), deleteHistoricDecisionInstancesJobHandler);
ProcessSetRemovalTimeJobHandler processSetRemovalTimeJobHandler = new ProcessSetRemovalTimeJobHandler();
batchHandlers.put(processSetRemovalTimeJobHandler.getType(), processSetRemovalTimeJobHandler);
DecisionSetRemovalTimeJobHandler decisionSetRemovalTimeJobHandler = new DecisionSetRemovalTimeJobHandler();
batchHandlers.put(decisionSetRemovalTimeJobHandler.getType(), decisionSetRemovalTimeJobHandler);
BatchSetRemovalTimeJobHandler batchSetRemovalTimeJobHandler = new BatchSetRemovalTimeJobHandler();
batchHandlers.put(batchSetRemovalTimeJobHandler.getType(), batchSetRemovalTimeJobHandler);
}
if (customBatchJobHandlers != null) {
for (BatchJobHandler<?> customBatchJobHandler : customBatchJobHandlers) {
batchHandlers.put(customBatchJobHandler.getType(), customBatchJobHandler);
}
}
}
// command executors ////////////////////////////////////////////////////////
protected abstract Collection<? extends CommandInterceptor> getDefaultCommandInterceptorsTxRequired();
protected abstract Collection<? extends CommandInterceptor> getDefaultCommandInterceptorsTxRequiresNew();
protected void initCommandExecutors() {
initActualCommandExecutor();
initCommandInterceptorsTxRequired();
initCommandExecutorTxRequired();
initCommandInterceptorsTxRequiresNew();
initCommandExecutorTxRequiresNew();
initCommandExecutorDbSchemaOperations();
}
protected void initActualCommandExecutor() {
actualCommandExecutor = new CommandExecutorImpl();
}
protected void initCommandInterceptorsTxRequired() {
if (commandInterceptorsTxRequired == null) {
if (customPreCommandInterceptorsTxRequired != null) {
commandInterceptorsTxRequired = new ArrayList<>(customPreCommandInterceptorsTxRequired);
} else {
commandInterceptorsTxRequired = new ArrayList<>();
}
commandInterceptorsTxRequired.addAll(getDefaultCommandInterceptorsTxRequired());
if (customPostCommandInterceptorsTxRequired != null) {
commandInterceptorsTxRequired.addAll(customPostCommandInterceptorsTxRequired);
}
commandInterceptorsTxRequired.add(actualCommandExecutor);
}
}
protected void initCommandInterceptorsTxRequiresNew() {
if (commandInterceptorsTxRequiresNew == null) {
if (customPreCommandInterceptorsTxRequiresNew != null) {
commandInterceptorsTxRequiresNew = new ArrayList<>(customPreCommandInterceptorsTxRequiresNew);
} else {
commandInterceptorsTxRequiresNew = new ArrayList<>();
}
commandInterceptorsTxRequiresNew.addAll(getDefaultCommandInterceptorsTxRequiresNew());
if (customPostCommandInterceptorsTxRequiresNew != null) {
commandInterceptorsTxRequiresNew.addAll(customPostCommandInterceptorsTxRequiresNew);
}
commandInterceptorsTxRequiresNew.add(actualCommandExecutor);
}
}
protected void initCommandExecutorTxRequired() {
if (commandExecutorTxRequired == null) {
commandExecutorTxRequired = initInterceptorChain(commandInterceptorsTxRequired);
}
}
protected void initCommandExecutorTxRequiresNew() {
if (commandExecutorTxRequiresNew == null) {
commandExecutorTxRequiresNew = initInterceptorChain(commandInterceptorsTxRequiresNew);
}
}
protected void initCommandExecutorDbSchemaOperations() {
if (commandExecutorSchemaOperations == null) {
// in default case, we use the same command executor for DB Schema Operations as for runtime operations.
// configurations that Use JTA Transactions should override this method and provide a custom command executor
// that uses NON-JTA Transactions.
commandExecutorSchemaOperations = commandExecutorTxRequired;
}
}
protected CommandInterceptor initInterceptorChain(List<CommandInterceptor> chain) {
if (chain == null || chain.isEmpty()) {
throw new ProcessEngineException("invalid command interceptor chain configuration: " + chain);
}
for (int i = 0; i < chain.size() - 1; i++) {
chain.get(i).setNext(chain.get(i + 1));
}
return chain.get(0);
}
// services /////////////////////////////////////////////////////////////////
protected void initServices() {
initService(repositoryService);
initService(runtimeService);
initService(historyService);
initService(identityService);
initService(taskService);
initService(formService);
initService(managementService);
initService(authorizationService);
initService(caseService);
initService(filterService);
initService(externalTaskService);
initService(decisionService);
initService(optimizeService);
}
protected void initService(Object service) {
if (service instanceof ServiceImpl) {
((ServiceImpl) service).setCommandExecutor(commandExecutorTxRequired);
}
if (service instanceof RepositoryServiceImpl) {
((RepositoryServiceImpl) service).setDeploymentCharset(getDefaultCharset());
}
}
// DataSource ///////////////////////////////////////////////////////////////
protected void initDataSource() {
if (dataSource == null) {
if (dataSourceJndiName != null) {
try {
dataSource = (DataSource) new InitialContext().lookup(dataSourceJndiName);
} catch (Exception e) {
throw new ProcessEngineException("couldn't lookup datasource from " + dataSourceJndiName + ": " + e.getMessage(), e);
}
} else if (jdbcUrl != null) {
if ((jdbcDriver == null) || (jdbcUrl == null) || (jdbcUsername == null)) {
throw new ProcessEngineException("DataSource or JDBC properties have to be specified in a process engine configuration");
}
PooledDataSource pooledDataSource =
new PooledDataSource(ReflectUtil.getClassLoader(), jdbcDriver, jdbcUrl, jdbcUsername, jdbcPassword);
if (jdbcMaxActiveConnections > 0) {
pooledDataSource.setPoolMaximumActiveConnections(jdbcMaxActiveConnections);
}
if (jdbcMaxIdleConnections > 0) {
pooledDataSource.setPoolMaximumIdleConnections(jdbcMaxIdleConnections);
}
if (jdbcMaxCheckoutTime > 0) {
pooledDataSource.setPoolMaximumCheckoutTime(jdbcMaxCheckoutTime);
}
if (jdbcMaxWaitTime > 0) {
pooledDataSource.setPoolTimeToWait(jdbcMaxWaitTime);
}
if (jdbcPingEnabled == true) {
pooledDataSource.setPoolPingEnabled(true);
if (jdbcPingQuery != null) {
pooledDataSource.setPoolPingQuery(jdbcPingQuery);
}
pooledDataSource.setPoolPingConnectionsNotUsedFor(jdbcPingConnectionNotUsedFor);
}
dataSource = pooledDataSource;
}
if (dataSource instanceof PooledDataSource) {
// ACT-233: connection pool of Ibatis is not properely initialized if this is not called!
((PooledDataSource) dataSource).forceCloseAll();
}
}
if (databaseType == null) {
initDatabaseType();
}
}
protected static Properties databaseTypeMappings = getDefaultDatabaseTypeMappings();
protected static final String MY_SQL_PRODUCT_NAME = "MySQL";
protected static final String MARIA_DB_PRODUCT_NAME = "MariaDB";
protected static Properties getDefaultDatabaseTypeMappings() {
Properties databaseTypeMappings = new Properties();
databaseTypeMappings.setProperty("H2", "h2");
databaseTypeMappings.setProperty(MY_SQL_PRODUCT_NAME, "mysql");
databaseTypeMappings.setProperty(MARIA_DB_PRODUCT_NAME, "mariadb");
databaseTypeMappings.setProperty("Oracle", "oracle");
databaseTypeMappings.setProperty("PostgreSQL", "postgres");
databaseTypeMappings.setProperty("Microsoft SQL Server", "mssql");
databaseTypeMappings.setProperty("DB2", "db2");
databaseTypeMappings.setProperty("DB2", "db2");
databaseTypeMappings.setProperty("DB2/NT", "db2");
databaseTypeMappings.setProperty("DB2/NT64", "db2");
databaseTypeMappings.setProperty("DB2 UDP", "db2");
databaseTypeMappings.setProperty("DB2/LINUX", "db2");
databaseTypeMappings.setProperty("DB2/LINUX390", "db2");
databaseTypeMappings.setProperty("DB2/LINUXX8664", "db2");
databaseTypeMappings.setProperty("DB2/LINUXZ64", "db2");
databaseTypeMappings.setProperty("DB2/400 SQL", "db2");
databaseTypeMappings.setProperty("DB2/6000", "db2");
databaseTypeMappings.setProperty("DB2 UDB iSeries", "db2");
databaseTypeMappings.setProperty("DB2/AIX64", "db2");
databaseTypeMappings.setProperty("DB2/HPUX", "db2");
databaseTypeMappings.setProperty("DB2/HP64", "db2");
databaseTypeMappings.setProperty("DB2/SUN", "db2");
databaseTypeMappings.setProperty("DB2/SUN64", "db2");
databaseTypeMappings.setProperty("DB2/PTX", "db2");
databaseTypeMappings.setProperty("DB2/2", "db2");
return databaseTypeMappings;
}
public void initDatabaseType() {
Connection connection = null;
try {
connection = dataSource.getConnection();
DatabaseMetaData databaseMetaData = connection.getMetaData();
String databaseProductName = databaseMetaData.getDatabaseProductName();
if (MY_SQL_PRODUCT_NAME.equals(databaseProductName)) {
databaseProductName = checkForMariaDb(databaseMetaData, databaseProductName);
}
LOG.debugDatabaseproductName(databaseProductName);
databaseType = databaseTypeMappings.getProperty(databaseProductName);
ensureNotNull("couldn't deduct database type from database product name '" + databaseProductName + "'", "databaseType", databaseType);
LOG.debugDatabaseType(databaseType);
} catch (SQLException e) {
LOG.databaseConnectionAccessException(e);
} finally {
try {
if (connection != null) {
connection.close();
}
} catch (SQLException e) {
LOG.databaseConnectionCloseException(e);
}
}
}
/**
* The product name of mariadb is still 'MySQL'. This method
* tries if it can find some evidence for mariadb. If it is successful
* it will return "MariaDB", otherwise the provided database name.
*/
protected String checkForMariaDb(DatabaseMetaData databaseMetaData, String databaseName) {
try {
String databaseProductVersion = databaseMetaData.getDatabaseProductVersion();
if (databaseProductVersion != null && databaseProductVersion.toLowerCase().contains("mariadb")) {
return MARIA_DB_PRODUCT_NAME;
}
} catch (SQLException ignore) {
}
try {
String driverName = databaseMetaData.getDriverName();
if (driverName != null && driverName.toLowerCase().contains("mariadb")) {
return MARIA_DB_PRODUCT_NAME;
}
} catch (SQLException ignore) {
}
String metaDataClassName = databaseMetaData.getClass().getName();
if (metaDataClassName != null && metaDataClassName.toLowerCase().contains("mariadb")) {
return MARIA_DB_PRODUCT_NAME;
}
return databaseName;
}
// myBatis SqlSessionFactory ////////////////////////////////////////////////
protected void initTransactionFactory() {
if (transactionFactory == null) {
if (transactionsExternallyManaged) {
transactionFactory = new ManagedTransactionFactory();
} else {
transactionFactory = new JdbcTransactionFactory();
}
}
}
protected void initSqlSessionFactory() {
// to protect access to cachedSqlSessionFactory see CAM-6682
synchronized (ProcessEngineConfigurationImpl.class) {
if (isUseSharedSqlSessionFactory) {
sqlSessionFactory = cachedSqlSessionFactory;
}
if (sqlSessionFactory == null) {
InputStream inputStream = null;
try {
inputStream = getMyBatisXmlConfigurationSteam();
// update the jdbc parameters to the configured ones...
Environment environment = new Environment("default", transactionFactory, dataSource);
Reader reader = new InputStreamReader(inputStream);
Properties properties = new Properties();
if (isUseSharedSqlSessionFactory) {
properties.put("prefix", "${@org.camunda.bpm.engine.impl.context.Context@getProcessEngineConfiguration().databaseTablePrefix}");
} else {
properties.put("prefix", databaseTablePrefix);
}
initSqlSessionFactoryProperties(properties, databaseTablePrefix, databaseType);
XMLConfigBuilder parser = new XMLConfigBuilder(reader, "", properties);
Configuration configuration = parser.getConfiguration();
configuration.setEnvironment(environment);
configuration = parser.parse();
configuration.setDefaultStatementTimeout(jdbcStatementTimeout);
if (isJdbcBatchProcessing()) {
configuration.setDefaultExecutorType(ExecutorType.BATCH);
}
sqlSessionFactory = new DefaultSqlSessionFactory(configuration);
if (isUseSharedSqlSessionFactory) {
cachedSqlSessionFactory = sqlSessionFactory;
}
} catch (Exception e) {
throw new ProcessEngineException("Error while building ibatis SqlSessionFactory: " + e.getMessage(), e);
} finally {
IoUtil.closeSilently(inputStream);
}
}
}
}
public static void initSqlSessionFactoryProperties(Properties properties, String databaseTablePrefix, String databaseType) {
if (databaseType != null) {
properties.put("limitBefore", DbSqlSessionFactory.databaseSpecificLimitBeforeStatements.get(databaseType));
properties.put("limitAfter", DbSqlSessionFactory.databaseSpecificLimitAfterStatements.get(databaseType));
properties.put("limitBeforeWithoutOffset", DbSqlSessionFactory.databaseSpecificLimitBeforeWithoutOffsetStatements.get(databaseType));
properties.put("limitAfterWithoutOffset", DbSqlSessionFactory.databaseSpecificLimitAfterWithoutOffsetStatements.get(databaseType));
properties.put("optimizeLimitBeforeWithoutOffset", DbSqlSessionFactory.optimizeDatabaseSpecificLimitBeforeWithoutOffsetStatements.get(databaseType));
properties.put("optimizeLimitAfterWithoutOffset", DbSqlSessionFactory.optimizeDatabaseSpecificLimitAfterWithoutOffsetStatements.get(databaseType));
properties.put("innerLimitAfter", DbSqlSessionFactory.databaseSpecificInnerLimitAfterStatements.get(databaseType));
properties.put("limitBetween", DbSqlSessionFactory.databaseSpecificLimitBetweenStatements.get(databaseType));
properties.put("limitBetweenFilter", DbSqlSessionFactory.databaseSpecificLimitBetweenFilterStatements.get(databaseType));
properties.put("limitBetweenAcquisition", DbSqlSessionFactory.databaseSpecificLimitBetweenAcquisitionStatements.get(databaseType));
properties.put("orderBy", DbSqlSessionFactory.databaseSpecificOrderByStatements.get(databaseType));
properties.put("limitBeforeNativeQuery", DbSqlSessionFactory.databaseSpecificLimitBeforeNativeQueryStatements.get(databaseType));
properties.put("distinct", DbSqlSessionFactory.databaseSpecificDistinct.get(databaseType));
properties.put("countDistinctBeforeStart", DbSqlSessionFactory.databaseSpecificCountDistinctBeforeStart.get(databaseType));
properties.put("countDistinctBeforeEnd", DbSqlSessionFactory.databaseSpecificCountDistinctBeforeEnd.get(databaseType));
properties.put("countDistinctAfterEnd", DbSqlSessionFactory.databaseSpecificCountDistinctAfterEnd.get(databaseType));
properties.put("escapeChar", DbSqlSessionFactory.databaseSpecificEscapeChar.get(databaseType));
properties.put("bitand1", DbSqlSessionFactory.databaseSpecificBitAnd1.get(databaseType));
properties.put("bitand2", DbSqlSessionFactory.databaseSpecificBitAnd2.get(databaseType));
properties.put("bitand3", DbSqlSessionFactory.databaseSpecificBitAnd3.get(databaseType));
properties.put("datepart1", DbSqlSessionFactory.databaseSpecificDatepart1.get(databaseType));
properties.put("datepart2", DbSqlSessionFactory.databaseSpecificDatepart2.get(databaseType));
properties.put("datepart3", DbSqlSessionFactory.databaseSpecificDatepart3.get(databaseType));
properties.put("trueConstant", DbSqlSessionFactory.databaseSpecificTrueConstant.get(databaseType));
properties.put("falseConstant", DbSqlSessionFactory.databaseSpecificFalseConstant.get(databaseType));
properties.put("dbSpecificDummyTable", DbSqlSessionFactory.databaseSpecificDummyTable.get(databaseType));
properties.put("dbSpecificIfNullFunction", DbSqlSessionFactory.databaseSpecificIfNull.get(databaseType));
properties.put("dayComparator", DbSqlSessionFactory.databaseSpecificDaysComparator.get(databaseType));
properties.put("collationForCaseSensitivity", DbSqlSessionFactory.databaseSpecificCollationForCaseSensitivity.get(databaseType));
Map<String, String> constants = DbSqlSessionFactory.dbSpecificConstants.get(databaseType);
for (Entry<String, String> entry : constants.entrySet()) {
properties.put(entry.getKey(), entry.getValue());
}
}
}
protected InputStream getMyBatisXmlConfigurationSteam() {
return ReflectUtil.getResourceAsStream(DEFAULT_MYBATIS_MAPPING_FILE);
}
// session factories ////////////////////////////////////////////////////////
protected void initIdentityProviderSessionFactory() {
if (identityProviderSessionFactory == null) {
identityProviderSessionFactory = new GenericManagerFactory(DbIdentityServiceProvider.class);
}
}
protected void initSessionFactories() {
if (sessionFactories == null) {
sessionFactories = new HashMap<>();
initPersistenceProviders();
addSessionFactory(new DbEntityManagerFactory(idGenerator));
addSessionFactory(new GenericManagerFactory(AttachmentManager.class));
addSessionFactory(new GenericManagerFactory(CommentManager.class));
addSessionFactory(new GenericManagerFactory(DeploymentManager.class));
addSessionFactory(new GenericManagerFactory(ExecutionManager.class));
addSessionFactory(new GenericManagerFactory(HistoricActivityInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricCaseActivityInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricStatisticsManager.class));
addSessionFactory(new GenericManagerFactory(HistoricDetailManager.class));
addSessionFactory(new GenericManagerFactory(HistoricProcessInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricCaseInstanceManager.class));
addSessionFactory(new GenericManagerFactory(UserOperationLogManager.class));
addSessionFactory(new GenericManagerFactory(HistoricTaskInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricVariableInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricIncidentManager.class));
addSessionFactory(new GenericManagerFactory(HistoricIdentityLinkLogManager.class));
addSessionFactory(new GenericManagerFactory(HistoricJobLogManager.class));
addSessionFactory(new GenericManagerFactory(HistoricExternalTaskLogManager.class));
addSessionFactory(new GenericManagerFactory(IdentityInfoManager.class));
addSessionFactory(new GenericManagerFactory(IdentityLinkManager.class));
addSessionFactory(new GenericManagerFactory(JobManager.class));
addSessionFactory(new GenericManagerFactory(JobDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(ProcessDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(PropertyManager.class));
addSessionFactory(new GenericManagerFactory(ResourceManager.class));
addSessionFactory(new GenericManagerFactory(ByteArrayManager.class));
addSessionFactory(new GenericManagerFactory(TableDataManager.class));
addSessionFactory(new GenericManagerFactory(TaskManager.class));
addSessionFactory(new GenericManagerFactory(TaskReportManager.class));
addSessionFactory(new GenericManagerFactory(VariableInstanceManager.class));
addSessionFactory(new GenericManagerFactory(EventSubscriptionManager.class));
addSessionFactory(new GenericManagerFactory(StatisticsManager.class));
addSessionFactory(new GenericManagerFactory(IncidentManager.class));
addSessionFactory(new GenericManagerFactory(AuthorizationManager.class));
addSessionFactory(new GenericManagerFactory(FilterManager.class));
addSessionFactory(new GenericManagerFactory(MeterLogManager.class));
addSessionFactory(new GenericManagerFactory(ExternalTaskManager.class));
addSessionFactory(new GenericManagerFactory(ReportManager.class));
addSessionFactory(new GenericManagerFactory(BatchManager.class));
addSessionFactory(new GenericManagerFactory(HistoricBatchManager.class));
addSessionFactory(new GenericManagerFactory(TenantManager.class));
addSessionFactory(new GenericManagerFactory(SchemaLogManager.class));
addSessionFactory(new GenericManagerFactory(CaseDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(CaseExecutionManager.class));
addSessionFactory(new GenericManagerFactory(CaseSentryPartManager.class));
addSessionFactory(new GenericManagerFactory(DecisionDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(DecisionRequirementsDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(HistoricDecisionInstanceManager.class));
addSessionFactory(new GenericManagerFactory(OptimizeManager.class));
sessionFactories.put(ReadOnlyIdentityProvider.class, identityProviderSessionFactory);
// check whether identityProviderSessionFactory implements WritableIdentityProvider
Class<?> identityProviderType = identityProviderSessionFactory.getSessionType();
if (WritableIdentityProvider.class.isAssignableFrom(identityProviderType)) {
sessionFactories.put(WritableIdentityProvider.class, identityProviderSessionFactory);
}
}
if (customSessionFactories != null) {
for (SessionFactory sessionFactory : customSessionFactories) {
addSessionFactory(sessionFactory);
}
}
}
protected void initPersistenceProviders() {
ensurePrefixAndSchemaFitToegether(databaseTablePrefix, databaseSchema);
dbSqlSessionFactory = new DbSqlSessionFactory(jdbcBatchProcessing);
dbSqlSessionFactory.setDatabaseType(databaseType);
dbSqlSessionFactory.setIdGenerator(idGenerator);
dbSqlSessionFactory.setSqlSessionFactory(sqlSessionFactory);
dbSqlSessionFactory.setDbIdentityUsed(isDbIdentityUsed);
dbSqlSessionFactory.setDbHistoryUsed(isDbHistoryUsed);
dbSqlSessionFactory.setCmmnEnabled(cmmnEnabled);
dbSqlSessionFactory.setDmnEnabled(dmnEnabled);
dbSqlSessionFactory.setDatabaseTablePrefix(databaseTablePrefix);
//hack for the case when schema is defined via databaseTablePrefix parameter and not via databaseSchema parameter
if (databaseTablePrefix != null && databaseSchema == null && databaseTablePrefix.contains(".")) {
databaseSchema = databaseTablePrefix.split("\\.")[0];
}
dbSqlSessionFactory.setDatabaseSchema(databaseSchema);
addSessionFactory(dbSqlSessionFactory);
addSessionFactory(new DbSqlPersistenceProviderFactory());
}
protected void initMigration() {
initMigrationInstructionValidators();
initMigrationActivityMatcher();
initMigrationInstructionGenerator();
initMigratingActivityInstanceValidators();
initMigratingTransitionInstanceValidators();
initMigratingCompensationInstanceValidators();
}
protected void initMigrationActivityMatcher() {
if (migrationActivityMatcher == null) {
migrationActivityMatcher = new DefaultMigrationActivityMatcher();
}
}
protected void initMigrationInstructionGenerator() {
if (migrationInstructionGenerator == null) {
migrationInstructionGenerator = new DefaultMigrationInstructionGenerator(migrationActivityMatcher);
}
List<MigrationActivityValidator> migrationActivityValidators = new ArrayList<>();
if (customPreMigrationActivityValidators != null) {
migrationActivityValidators.addAll(customPreMigrationActivityValidators);
}
migrationActivityValidators.addAll(getDefaultMigrationActivityValidators());
if (customPostMigrationActivityValidators != null) {
migrationActivityValidators.addAll(customPostMigrationActivityValidators);
}
migrationInstructionGenerator = migrationInstructionGenerator
.migrationActivityValidators(migrationActivityValidators)
.migrationInstructionValidators(migrationInstructionValidators);
}
protected void initMigrationInstructionValidators() {
if (migrationInstructionValidators == null) {
migrationInstructionValidators = new ArrayList<>();
if (customPreMigrationInstructionValidators != null) {
migrationInstructionValidators.addAll(customPreMigrationInstructionValidators);
}
migrationInstructionValidators.addAll(getDefaultMigrationInstructionValidators());
if (customPostMigrationInstructionValidators != null) {
migrationInstructionValidators.addAll(customPostMigrationInstructionValidators);
}
}
}
protected void initMigratingActivityInstanceValidators() {
if (migratingActivityInstanceValidators == null) {
migratingActivityInstanceValidators = new ArrayList<>();
if (customPreMigratingActivityInstanceValidators != null) {
migratingActivityInstanceValidators.addAll(customPreMigratingActivityInstanceValidators);
}
migratingActivityInstanceValidators.addAll(getDefaultMigratingActivityInstanceValidators());
if (customPostMigratingActivityInstanceValidators != null) {
migratingActivityInstanceValidators.addAll(customPostMigratingActivityInstanceValidators);
}
}
}
protected void initMigratingTransitionInstanceValidators() {
if (migratingTransitionInstanceValidators == null) {
migratingTransitionInstanceValidators = new ArrayList<>();
migratingTransitionInstanceValidators.addAll(getDefaultMigratingTransitionInstanceValidators());
}
}
protected void initMigratingCompensationInstanceValidators() {
if (migratingCompensationInstanceValidators == null) {
migratingCompensationInstanceValidators = new ArrayList<>();
migratingCompensationInstanceValidators.add(new NoUnmappedLeafInstanceValidator());
migratingCompensationInstanceValidators.add(new NoUnmappedCompensationStartEventValidator());
}
}
/**
* When providing a schema and a prefix the prefix has to be the schema ending with a dot.
*/
protected void ensurePrefixAndSchemaFitToegether(String prefix, String schema) {
if (schema == null) {
return;
} else if (prefix == null || (prefix != null && !prefix.startsWith(schema + "."))) {
throw new ProcessEngineException("When setting a schema the prefix has to be schema + '.'. Received schema: " + schema + " prefix: " + prefix);
}
}
protected void addSessionFactory(SessionFactory sessionFactory) {
sessionFactories.put(sessionFactory.getSessionType(), sessionFactory);
}
// deployers ////////////////////////////////////////////////////////////////
protected void initDeployers() {
if (this.deployers == null) {
this.deployers = new ArrayList<>();
if (customPreDeployers != null) {
this.deployers.addAll(customPreDeployers);
}
this.deployers.addAll(getDefaultDeployers());
if (customPostDeployers != null) {
this.deployers.addAll(customPostDeployers);
}
}
if (deploymentCache == null) {
List<Deployer> deployers = new ArrayList<>();
if (customPreDeployers != null) {
deployers.addAll(customPreDeployers);
}
deployers.addAll(getDefaultDeployers());
if (customPostDeployers != null) {
deployers.addAll(customPostDeployers);
}
initCacheFactory();
deploymentCache = new DeploymentCache(cacheFactory, cacheCapacity);
deploymentCache.setDeployers(deployers);
}
}
protected Collection<? extends Deployer> getDefaultDeployers() {
List<Deployer> defaultDeployers = new ArrayList<>();
BpmnDeployer bpmnDeployer = getBpmnDeployer();
defaultDeployers.add(bpmnDeployer);
if (isCmmnEnabled()) {
CmmnDeployer cmmnDeployer = getCmmnDeployer();
defaultDeployers.add(cmmnDeployer);
}
if (isDmnEnabled()) {
DecisionRequirementsDefinitionDeployer decisionRequirementsDefinitionDeployer = getDecisionRequirementsDefinitionDeployer();
DecisionDefinitionDeployer decisionDefinitionDeployer = getDecisionDefinitionDeployer();
// the DecisionRequirementsDefinition cacheDeployer must be before the DecisionDefinitionDeployer
defaultDeployers.add(decisionRequirementsDefinitionDeployer);
defaultDeployers.add(decisionDefinitionDeployer);
}
return defaultDeployers;
}
protected BpmnDeployer getBpmnDeployer() {
BpmnDeployer bpmnDeployer = new BpmnDeployer();
bpmnDeployer.setExpressionManager(expressionManager);
bpmnDeployer.setIdGenerator(idGenerator);
if (bpmnParseFactory == null) {
bpmnParseFactory = new DefaultBpmnParseFactory();
}
BpmnParser bpmnParser = new BpmnParser(expressionManager, bpmnParseFactory);
if (preParseListeners != null) {
bpmnParser.getParseListeners().addAll(preParseListeners);
}
bpmnParser.getParseListeners().addAll(getDefaultBPMNParseListeners());
if (postParseListeners != null) {
bpmnParser.getParseListeners().addAll(postParseListeners);
}
bpmnDeployer.setBpmnParser(bpmnParser);
return bpmnDeployer;
}
protected List<BpmnParseListener> getDefaultBPMNParseListeners() {
List<BpmnParseListener> defaultListeners = new ArrayList<>();
if (!HistoryLevel.HISTORY_LEVEL_NONE.equals(historyLevel)) {
defaultListeners.add(new HistoryParseListener(historyEventProducer));
}
if (isMetricsEnabled) {
defaultListeners.add(new MetricsBpmnParseListener());
}
return defaultListeners;
}
protected CmmnDeployer getCmmnDeployer() {
CmmnDeployer cmmnDeployer = new CmmnDeployer();
cmmnDeployer.setIdGenerator(idGenerator);
if (cmmnTransformFactory == null) {
cmmnTransformFactory = new DefaultCmmnTransformFactory();
}
if (cmmnElementHandlerRegistry == null) {
cmmnElementHandlerRegistry = new DefaultCmmnElementHandlerRegistry();
}
CmmnTransformer cmmnTransformer = new CmmnTransformer(expressionManager, cmmnElementHandlerRegistry, cmmnTransformFactory);
if (customPreCmmnTransformListeners != null) {
cmmnTransformer.getTransformListeners().addAll(customPreCmmnTransformListeners);
}
cmmnTransformer.getTransformListeners().addAll(getDefaultCmmnTransformListeners());
if (customPostCmmnTransformListeners != null) {
cmmnTransformer.getTransformListeners().addAll(customPostCmmnTransformListeners);
}
cmmnDeployer.setTransformer(cmmnTransformer);
return cmmnDeployer;
}
protected List<CmmnTransformListener> getDefaultCmmnTransformListeners() {
List<CmmnTransformListener> defaultListener = new ArrayList<>();
if (!HistoryLevel.HISTORY_LEVEL_NONE.equals(historyLevel)) {
defaultListener.add(new CmmnHistoryTransformListener(cmmnHistoryEventProducer));
}
if (isMetricsEnabled) {
defaultListener.add(new MetricsCmmnTransformListener());
}
return defaultListener;
}
protected DecisionDefinitionDeployer getDecisionDefinitionDeployer() {
DecisionDefinitionDeployer decisionDefinitionDeployer = new DecisionDefinitionDeployer();
decisionDefinitionDeployer.setIdGenerator(idGenerator);
decisionDefinitionDeployer.setTransformer(dmnEngineConfiguration.getTransformer());
return decisionDefinitionDeployer;
}
protected DecisionRequirementsDefinitionDeployer getDecisionRequirementsDefinitionDeployer() {
DecisionRequirementsDefinitionDeployer drdDeployer = new DecisionRequirementsDefinitionDeployer();
drdDeployer.setIdGenerator(idGenerator);
drdDeployer.setTransformer(dmnEngineConfiguration.getTransformer());
return drdDeployer;
}
public DmnEngine getDmnEngine() {
return dmnEngine;
}
public void setDmnEngine(DmnEngine dmnEngine) {
this.dmnEngine = dmnEngine;
}
public DefaultDmnEngineConfiguration getDmnEngineConfiguration() {
return dmnEngineConfiguration;
}
public void setDmnEngineConfiguration(DefaultDmnEngineConfiguration dmnEngineConfiguration) {
this.dmnEngineConfiguration = dmnEngineConfiguration;
}
// job executor /////////////////////////////////////////////////////////////
protected void initJobExecutor() {
if (jobExecutor == null) {
jobExecutor = new DefaultJobExecutor();
}
jobHandlers = new HashMap<>();
TimerExecuteNestedActivityJobHandler timerExecuteNestedActivityJobHandler = new TimerExecuteNestedActivityJobHandler();
jobHandlers.put(timerExecuteNestedActivityJobHandler.getType(), timerExecuteNestedActivityJobHandler);
TimerCatchIntermediateEventJobHandler timerCatchIntermediateEvent = new TimerCatchIntermediateEventJobHandler();
jobHandlers.put(timerCatchIntermediateEvent.getType(), timerCatchIntermediateEvent);
TimerStartEventJobHandler timerStartEvent = new TimerStartEventJobHandler();
jobHandlers.put(timerStartEvent.getType(), timerStartEvent);
TimerStartEventSubprocessJobHandler timerStartEventSubprocess = new TimerStartEventSubprocessJobHandler();
jobHandlers.put(timerStartEventSubprocess.getType(), timerStartEventSubprocess);
AsyncContinuationJobHandler asyncContinuationJobHandler = new AsyncContinuationJobHandler();
jobHandlers.put(asyncContinuationJobHandler.getType(), asyncContinuationJobHandler);
ProcessEventJobHandler processEventJobHandler = new ProcessEventJobHandler();
jobHandlers.put(processEventJobHandler.getType(), processEventJobHandler);
TimerSuspendProcessDefinitionHandler suspendProcessDefinitionHandler = new TimerSuspendProcessDefinitionHandler();
jobHandlers.put(suspendProcessDefinitionHandler.getType(), suspendProcessDefinitionHandler);
TimerActivateProcessDefinitionHandler activateProcessDefinitionHandler = new TimerActivateProcessDefinitionHandler();
jobHandlers.put(activateProcessDefinitionHandler.getType(), activateProcessDefinitionHandler);
TimerSuspendJobDefinitionHandler suspendJobDefinitionHandler = new TimerSuspendJobDefinitionHandler();
jobHandlers.put(suspendJobDefinitionHandler.getType(), suspendJobDefinitionHandler);
TimerActivateJobDefinitionHandler activateJobDefinitionHandler = new TimerActivateJobDefinitionHandler();
jobHandlers.put(activateJobDefinitionHandler.getType(), activateJobDefinitionHandler);
TimerTaskListenerJobHandler taskListenerJobHandler = new TimerTaskListenerJobHandler();
jobHandlers.put(taskListenerJobHandler.getType(), taskListenerJobHandler);
BatchSeedJobHandler batchSeedJobHandler = new BatchSeedJobHandler();
jobHandlers.put(batchSeedJobHandler.getType(), batchSeedJobHandler);
BatchMonitorJobHandler batchMonitorJobHandler = new BatchMonitorJobHandler();
jobHandlers.put(batchMonitorJobHandler.getType(), batchMonitorJobHandler);
HistoryCleanupJobHandler historyCleanupJobHandler = new HistoryCleanupJobHandler();
jobHandlers.put(historyCleanupJobHandler.getType(), historyCleanupJobHandler);
for (JobHandler batchHandler : batchHandlers.values()) {
jobHandlers.put(batchHandler.getType(), batchHandler);
}
// if we have custom job handlers, register them
if (getCustomJobHandlers() != null) {
for (JobHandler customJobHandler : getCustomJobHandlers()) {
jobHandlers.put(customJobHandler.getType(), customJobHandler);
}
}
jobExecutor.setAutoActivate(jobExecutorActivate);
if (jobExecutor.getRejectedJobsHandler() == null) {
if (customRejectedJobsHandler != null) {
jobExecutor.setRejectedJobsHandler(customRejectedJobsHandler);
} else {
jobExecutor.setRejectedJobsHandler(new NotifyAcquisitionRejectedJobsHandler());
}
}
}
protected void initJobProvider() {
if (producePrioritizedJobs && jobPriorityProvider == null) {
jobPriorityProvider = new DefaultJobPriorityProvider();
}
}
//external task /////////////////////////////////////////////////////////////
protected void initExternalTaskPriorityProvider() {
if (producePrioritizedExternalTasks && externalTaskPriorityProvider == null) {
externalTaskPriorityProvider = new DefaultExternalTaskPriorityProvider();
}
}
// history //////////////////////////////////////////////////////////////////
public void initHistoryLevel() {
if (historyLevel != null) {
setHistory(historyLevel.getName());
}
if (historyLevels == null) {
historyLevels = new ArrayList<>();
historyLevels.add(HistoryLevel.HISTORY_LEVEL_NONE);
historyLevels.add(HistoryLevel.HISTORY_LEVEL_ACTIVITY);
historyLevels.add(HistoryLevel.HISTORY_LEVEL_AUDIT);
historyLevels.add(HistoryLevel.HISTORY_LEVEL_FULL);
}
if (customHistoryLevels != null) {
historyLevels.addAll(customHistoryLevels);
}
if (HISTORY_VARIABLE.equalsIgnoreCase(history)) {
historyLevel = HistoryLevel.HISTORY_LEVEL_ACTIVITY;
LOG.usingDeprecatedHistoryLevelVariable();
} else {
for (HistoryLevel historyLevel : historyLevels) {
if (historyLevel.getName().equalsIgnoreCase(history)) {
this.historyLevel = historyLevel;
}
}
}
// do allow null for history level in case of "auto"
if (historyLevel == null && !ProcessEngineConfiguration.HISTORY_AUTO.equalsIgnoreCase(history)) {
throw new ProcessEngineException("invalid history level: " + history);
}
}
// id generator /////////////////////////////////////////////////////////////
protected void initIdGenerator() {
if (idGenerator == null) {
CommandExecutor idGeneratorCommandExecutor = null;
if (idGeneratorDataSource != null) {
ProcessEngineConfigurationImpl processEngineConfiguration = new StandaloneProcessEngineConfiguration();
processEngineConfiguration.setDataSource(idGeneratorDataSource);
processEngineConfiguration.setDatabaseSchemaUpdate(DB_SCHEMA_UPDATE_FALSE);
processEngineConfiguration.init();
idGeneratorCommandExecutor = processEngineConfiguration.getCommandExecutorTxRequiresNew();
} else if (idGeneratorDataSourceJndiName != null) {
ProcessEngineConfigurationImpl processEngineConfiguration = new StandaloneProcessEngineConfiguration();
processEngineConfiguration.setDataSourceJndiName(idGeneratorDataSourceJndiName);
processEngineConfiguration.setDatabaseSchemaUpdate(DB_SCHEMA_UPDATE_FALSE);
processEngineConfiguration.init();
idGeneratorCommandExecutor = processEngineConfiguration.getCommandExecutorTxRequiresNew();
} else {
idGeneratorCommandExecutor = commandExecutorTxRequiresNew;
}
DbIdGenerator dbIdGenerator = new DbIdGenerator();
dbIdGenerator.setIdBlockSize(idBlockSize);
dbIdGenerator.setCommandExecutor(idGeneratorCommandExecutor);
idGenerator = dbIdGenerator;
}
}
// OTHER ////////////////////////////////////////////////////////////////////
protected void initCommandContextFactory() {
if (commandContextFactory == null) {
commandContextFactory = new CommandContextFactory();
commandContextFactory.setProcessEngineConfiguration(this);
}
}
protected void initTransactionContextFactory() {
if (transactionContextFactory == null) {
transactionContextFactory = new StandaloneTransactionContextFactory();
}
}
protected void initValueTypeResolver() {
if (valueTypeResolver == null) {
valueTypeResolver = new ValueTypeResolverImpl();
}
}
protected void initDefaultCharset() {
if (defaultCharset == null) {
if (defaultCharsetName == null) {
defaultCharsetName = "UTF-8";
}
defaultCharset = Charset.forName(defaultCharsetName);
}
}
protected void initMetrics() {
if (isMetricsEnabled) {
if (metricsReporterIdProvider == null) {
metricsReporterIdProvider = new SimpleIpBasedProvider();
}
if (metricsRegistry == null) {
metricsRegistry = new MetricsRegistry();
}
initDefaultMetrics(metricsRegistry);
if (dbMetricsReporter == null) {
dbMetricsReporter = new DbMetricsReporter(metricsRegistry, commandExecutorTxRequired);
}
}
}
protected void initDefaultMetrics(MetricsRegistry metricsRegistry) {
metricsRegistry.createMeter(Metrics.ACTIVTY_INSTANCE_START);
metricsRegistry.createMeter(Metrics.ACTIVTY_INSTANCE_END);
metricsRegistry.createMeter(Metrics.JOB_ACQUISITION_ATTEMPT);
metricsRegistry.createMeter(Metrics.JOB_ACQUIRED_SUCCESS);
metricsRegistry.createMeter(Metrics.JOB_ACQUIRED_FAILURE);
metricsRegistry.createMeter(Metrics.JOB_SUCCESSFUL);
metricsRegistry.createMeter(Metrics.JOB_FAILED);
metricsRegistry.createMeter(Metrics.JOB_LOCKED_EXCLUSIVE);
metricsRegistry.createMeter(Metrics.JOB_EXECUTION_REJECTED);
metricsRegistry.createMeter(Metrics.EXECUTED_DECISION_ELEMENTS);
}
protected void initSerialization() {
if (variableSerializers == null) {
variableSerializers = new DefaultVariableSerializers();
if (customPreVariableSerializers != null) {
for (TypedValueSerializer<?> customVariableType : customPreVariableSerializers) {
variableSerializers.addSerializer(customVariableType);
}
}
// register built-in serializers
variableSerializers.addSerializer(new NullValueSerializer());
variableSerializers.addSerializer(new StringValueSerializer());
variableSerializers.addSerializer(new BooleanValueSerializer());
variableSerializers.addSerializer(new ShortValueSerializer());
variableSerializers.addSerializer(new IntegerValueSerializer());
variableSerializers.addSerializer(new LongValueSerlializer());
variableSerializers.addSerializer(new DateValueSerializer());
variableSerializers.addSerializer(new DoubleValueSerializer());
variableSerializers.addSerializer(new ByteArrayValueSerializer());
variableSerializers.addSerializer(new JavaObjectSerializer());
variableSerializers.addSerializer(new FileValueSerializer());
if (customPostVariableSerializers != null) {
for (TypedValueSerializer<?> customVariableType : customPostVariableSerializers) {
variableSerializers.addSerializer(customVariableType);
}
}
}
}
protected void initFormEngines() {
if (formEngines == null) {
formEngines = new HashMap<>();
// html form engine = default form engine
FormEngine defaultFormEngine = new HtmlFormEngine();
formEngines.put(null, defaultFormEngine); // default form engine is looked up with null
formEngines.put(defaultFormEngine.getName(), defaultFormEngine);
FormEngine juelFormEngine = new JuelFormEngine();
formEngines.put(juelFormEngine.getName(), juelFormEngine);
}
if (customFormEngines != null) {
for (FormEngine formEngine : customFormEngines) {
formEngines.put(formEngine.getName(), formEngine);
}
}
}
protected void initFormTypes() {
if (formTypes == null) {
formTypes = new FormTypes();
formTypes.addFormType(new StringFormType());
formTypes.addFormType(new LongFormType());
formTypes.addFormType(new DateFormType("dd/MM/yyyy"));
formTypes.addFormType(new BooleanFormType());
}
if (customFormTypes != null) {
for (AbstractFormFieldType customFormType : customFormTypes) {
formTypes.addFormType(customFormType);
}
}
}
protected void initFormFieldValidators() {
if (formValidators == null) {
formValidators = new FormValidators();
formValidators.addValidator("min", MinValidator.class);
formValidators.addValidator("max", MaxValidator.class);
formValidators.addValidator("minlength", MinLengthValidator.class);
formValidators.addValidator("maxlength", MaxLengthValidator.class);
formValidators.addValidator("required", RequiredValidator.class);
formValidators.addValidator("readonly", ReadOnlyValidator.class);
}
if (customFormFieldValidators != null) {
for (Entry<String, Class<? extends FormFieldValidator>> validator : customFormFieldValidators.entrySet()) {
formValidators.addValidator(validator.getKey(), validator.getValue());
}
}
}
protected void initScripting() {
if (resolverFactories == null) {
resolverFactories = new ArrayList<>();
resolverFactories.add(new MocksResolverFactory());
resolverFactories.add(new VariableScopeResolverFactory());
resolverFactories.add(new BeansResolverFactory());
}
if (scriptingEngines == null) {
scriptingEngines = new ScriptingEngines(new ScriptBindingsFactory(resolverFactories));
scriptingEngines.setEnableScriptEngineCaching(enableScriptEngineCaching);
}
if (scriptFactory == null) {
scriptFactory = new ScriptFactory();
}
if (scriptEnvResolvers == null) {
scriptEnvResolvers = new ArrayList<>();
}
if (scriptingEnvironment == null) {
scriptingEnvironment = new ScriptingEnvironment(scriptFactory, scriptEnvResolvers, scriptingEngines);
}
}
protected void initDmnEngine() {
if (dmnEngine == null) {
if (dmnEngineConfiguration == null) {
dmnEngineConfiguration = (DefaultDmnEngineConfiguration) DmnEngineConfiguration.createDefaultDmnEngineConfiguration();
}
dmnEngineConfiguration = new DmnEngineConfigurationBuilder(dmnEngineConfiguration)
.dmnHistoryEventProducer(dmnHistoryEventProducer)
.scriptEngineResolver(scriptingEngines)
.expressionManager(expressionManager)
.build();
dmnEngine = dmnEngineConfiguration.buildEngine();
} else if (dmnEngineConfiguration == null) {
dmnEngineConfiguration = (DefaultDmnEngineConfiguration) dmnEngine.getConfiguration();
}
}
protected void initExpressionManager() {
if (expressionManager == null) {
expressionManager = new ExpressionManager(beans);
}
// add function mapper for command context (eg currentUser(), currentUserGroups())
expressionManager.addFunctionMapper(new CommandContextFunctionMapper());
// add function mapper for date time (eg now(), dateTime())
expressionManager.addFunctionMapper(new DateTimeFunctionMapper());
}
protected void initBusinessCalendarManager() {
if (businessCalendarManager == null) {
MapBusinessCalendarManager mapBusinessCalendarManager = new MapBusinessCalendarManager();
mapBusinessCalendarManager.addBusinessCalendar(DurationBusinessCalendar.NAME, new DurationBusinessCalendar());
mapBusinessCalendarManager.addBusinessCalendar(DueDateBusinessCalendar.NAME, new DueDateBusinessCalendar());
mapBusinessCalendarManager.addBusinessCalendar(CycleBusinessCalendar.NAME, new CycleBusinessCalendar());
businessCalendarManager = mapBusinessCalendarManager;
}
}
protected void initDelegateInterceptor() {
if (delegateInterceptor == null) {
delegateInterceptor = new DefaultDelegateInterceptor();
}
}
protected void initEventHandlers() {
if (eventHandlers == null) {
eventHandlers = new HashMap<>();
SignalEventHandler signalEventHander = new SignalEventHandler();
eventHandlers.put(signalEventHander.getEventHandlerType(), signalEventHander);
CompensationEventHandler compensationEventHandler = new CompensationEventHandler();
eventHandlers.put(compensationEventHandler.getEventHandlerType(), compensationEventHandler);
EventHandler messageEventHandler = new EventHandlerImpl(EventType.MESSAGE);
eventHandlers.put(messageEventHandler.getEventHandlerType(), messageEventHandler);
EventHandler conditionalEventHandler = new ConditionalEventHandler();
eventHandlers.put(conditionalEventHandler.getEventHandlerType(), conditionalEventHandler);
}
if (customEventHandlers != null) {
for (EventHandler eventHandler : customEventHandlers) {
eventHandlers.put(eventHandler.getEventHandlerType(), eventHandler);
}
}
}
protected void initCommandCheckers() {
if (commandCheckers == null) {
commandCheckers = new ArrayList<>();
// add the default command checkers
commandCheckers.add(new TenantCommandChecker());
commandCheckers.add(new AuthorizationCommandChecker());
}
}
// JPA //////////////////////////////////////////////////////////////////////
protected void initJpa() {
if (jpaPersistenceUnitName != null) {
jpaEntityManagerFactory = JpaHelper.createEntityManagerFactory(jpaPersistenceUnitName);
}
if (jpaEntityManagerFactory != null) {
sessionFactories.put(EntityManagerSession.class, new EntityManagerSessionFactory(jpaEntityManagerFactory, jpaHandleTransaction, jpaCloseEntityManager));
JPAVariableSerializer jpaType = (JPAVariableSerializer) variableSerializers.getSerializerByName(JPAVariableSerializer.NAME);
// Add JPA-type
if (jpaType == null) {
// We try adding the variable right after byte serializer, if available
int serializableIndex = variableSerializers.getSerializerIndexByName(ValueType.BYTES.getName());
if (serializableIndex > -1) {
variableSerializers.addSerializer(new JPAVariableSerializer(), serializableIndex);
} else {
variableSerializers.addSerializer(new JPAVariableSerializer());
}
}
}
}
protected void initBeans() {
if (beans == null) {
beans = new HashMap<>();
}
}
protected void initArtifactFactory() {
if (artifactFactory == null) {
artifactFactory = new DefaultArtifactFactory();
}
}
protected void initProcessApplicationManager() {
if (processApplicationManager == null) {
processApplicationManager = new ProcessApplicationManager();
}
}
// correlation handler //////////////////////////////////////////////////////
protected void initCorrelationHandler() {
if (correlationHandler == null) {
correlationHandler = new DefaultCorrelationHandler();
}
}
// condition handler //////////////////////////////////////////////////////
protected void initConditionHandler() {
if (conditionHandler == null) {
conditionHandler = new DefaultConditionHandler();
}
}
// deployment handler //////////////////////////////////////////////////////
protected void initDeploymentHandlerFactory() {
if (deploymentHandlerFactory == null) {
deploymentHandlerFactory = new DefaultDeploymentHandlerFactory();
}
}
// history handlers /////////////////////////////////////////////////////
protected void initHistoryEventProducer() {
if (historyEventProducer == null) {
historyEventProducer = new CacheAwareHistoryEventProducer();
}
}
protected void initCmmnHistoryEventProducer() {
if (cmmnHistoryEventProducer == null) {
cmmnHistoryEventProducer = new CacheAwareCmmnHistoryEventProducer();
}
}
protected void initDmnHistoryEventProducer() {
if (dmnHistoryEventProducer == null) {
dmnHistoryEventProducer = new DefaultDmnHistoryEventProducer();
}
}
protected void initHistoryEventHandler() {
if (historyEventHandler == null) {
historyEventHandler = new DbHistoryEventHandler();
}
}
// password digest //////////////////////////////////////////////////////////
protected void initPasswordDigest() {
if(saltGenerator == null) {
saltGenerator = new Default16ByteSaltGenerator();
}
if (passwordEncryptor == null) {
passwordEncryptor = new Sha512HashDigest();
}
if(customPasswordChecker == null) {
customPasswordChecker = Collections.emptyList();
}
if(passwordManager == null) {
passwordManager = new PasswordManager(passwordEncryptor, customPasswordChecker);
}
}
public void initPasswordPolicy() {
if(passwordPolicy == null && enablePasswordPolicy) {
passwordPolicy = new DefaultPasswordPolicyImpl();
}
}
protected void initDeploymentRegistration() {
if (registeredDeployments == null) {
registeredDeployments = new CopyOnWriteArraySet<>();
}
}
// cache factory //////////////////////////////////////////////////////////
protected void initCacheFactory() {
if (cacheFactory == null) {
cacheFactory = new DefaultCacheFactory();
}
}
// resource authorization provider //////////////////////////////////////////
protected void initResourceAuthorizationProvider() {
if (resourceAuthorizationProvider == null) {
resourceAuthorizationProvider = new DefaultAuthorizationProvider();
}
}
protected void initPermissionProvider() {
if (permissionProvider == null) {
permissionProvider = new DefaultPermissionProvider();
}
}
protected void initDefaultUserPermissionForTask() {
if (defaultUserPermissionForTask == null) {
if (Permissions.UPDATE.getName().equals(defaultUserPermissionNameForTask)) {
defaultUserPermissionForTask = Permissions.UPDATE;
} else if (Permissions.TASK_WORK.getName().equals(defaultUserPermissionNameForTask)) {
defaultUserPermissionForTask = Permissions.TASK_WORK;
} else {
throw LOG.invalidConfigDefaultUserPermissionNameForTask(defaultUserPermissionNameForTask, new String[]{Permissions.UPDATE.getName(), Permissions.TASK_WORK.getName()});
}
}
}
protected void initAdminUser() {
if (adminUsers == null) {
adminUsers = new ArrayList<>();
}
}
protected void initAdminGroups() {
if (adminGroups == null) {
adminGroups = new ArrayList<>();
}
if (adminGroups.isEmpty() || !(adminGroups.contains(Groups.CAMUNDA_ADMIN))) {
adminGroups.add(Groups.CAMUNDA_ADMIN);
}
}
// getters and setters //////////////////////////////////////////////////////
@Override
public String getProcessEngineName() {
return processEngineName;
}
public HistoryLevel getHistoryLevel() {
return historyLevel;
}
public void setHistoryLevel(HistoryLevel historyLevel) {
this.historyLevel = historyLevel;
}
public HistoryLevel getDefaultHistoryLevel() {
if (historyLevels != null) {
for (HistoryLevel historyLevel : historyLevels) {
if (HISTORY_DEFAULT != null && HISTORY_DEFAULT.equalsIgnoreCase(historyLevel.getName())) {
return historyLevel;
}
}
}
return null;
}
@Override
public ProcessEngineConfigurationImpl setProcessEngineName(String processEngineName) {
this.processEngineName = processEngineName;
return this;
}
public List<CommandInterceptor> getCustomPreCommandInterceptorsTxRequired() {
return customPreCommandInterceptorsTxRequired;
}
public ProcessEngineConfigurationImpl setCustomPreCommandInterceptorsTxRequired(List<CommandInterceptor> customPreCommandInterceptorsTxRequired) {
this.customPreCommandInterceptorsTxRequired = customPreCommandInterceptorsTxRequired;
return this;
}
public List<CommandInterceptor> getCustomPostCommandInterceptorsTxRequired() {
return customPostCommandInterceptorsTxRequired;
}
public ProcessEngineConfigurationImpl setCustomPostCommandInterceptorsTxRequired(List<CommandInterceptor> customPostCommandInterceptorsTxRequired) {
this.customPostCommandInterceptorsTxRequired = customPostCommandInterceptorsTxRequired;
return this;
}
public List<CommandInterceptor> getCommandInterceptorsTxRequired() {
return commandInterceptorsTxRequired;
}
public ProcessEngineConfigurationImpl setCommandInterceptorsTxRequired(List<CommandInterceptor> commandInterceptorsTxRequired) {
this.commandInterceptorsTxRequired = commandInterceptorsTxRequired;
return this;
}
public CommandExecutor getCommandExecutorTxRequired() {
return commandExecutorTxRequired;
}
public ProcessEngineConfigurationImpl setCommandExecutorTxRequired(CommandExecutor commandExecutorTxRequired) {
this.commandExecutorTxRequired = commandExecutorTxRequired;
return this;
}
public List<CommandInterceptor> getCustomPreCommandInterceptorsTxRequiresNew() {
return customPreCommandInterceptorsTxRequiresNew;
}
public ProcessEngineConfigurationImpl setCustomPreCommandInterceptorsTxRequiresNew(List<CommandInterceptor> customPreCommandInterceptorsTxRequiresNew) {
this.customPreCommandInterceptorsTxRequiresNew = customPreCommandInterceptorsTxRequiresNew;
return this;
}
public List<CommandInterceptor> getCustomPostCommandInterceptorsTxRequiresNew() {
return customPostCommandInterceptorsTxRequiresNew;
}
public ProcessEngineConfigurationImpl setCustomPostCommandInterceptorsTxRequiresNew(List<CommandInterceptor> customPostCommandInterceptorsTxRequiresNew) {
this.customPostCommandInterceptorsTxRequiresNew = customPostCommandInterceptorsTxRequiresNew;
return this;
}
public List<CommandInterceptor> getCommandInterceptorsTxRequiresNew() {
return commandInterceptorsTxRequiresNew;
}
public ProcessEngineConfigurationImpl setCommandInterceptorsTxRequiresNew(List<CommandInterceptor> commandInterceptorsTxRequiresNew) {
this.commandInterceptorsTxRequiresNew = commandInterceptorsTxRequiresNew;
return this;
}
public CommandExecutor getCommandExecutorTxRequiresNew() {
return commandExecutorTxRequiresNew;
}
public ProcessEngineConfigurationImpl setCommandExecutorTxRequiresNew(CommandExecutor commandExecutorTxRequiresNew) {
this.commandExecutorTxRequiresNew = commandExecutorTxRequiresNew;
return this;
}
public RepositoryService getRepositoryService() {
return repositoryService;
}
public ProcessEngineConfigurationImpl setRepositoryService(RepositoryService repositoryService) {
this.repositoryService = repositoryService;
return this;
}
public RuntimeService getRuntimeService() {
return runtimeService;
}
public ProcessEngineConfigurationImpl setRuntimeService(RuntimeService runtimeService) {
this.runtimeService = runtimeService;
return this;
}
public HistoryService getHistoryService() {
return historyService;
}
public ProcessEngineConfigurationImpl setHistoryService(HistoryService historyService) {
this.historyService = historyService;
return this;
}
public IdentityService getIdentityService() {
return identityService;
}
public ProcessEngineConfigurationImpl setIdentityService(IdentityService identityService) {
this.identityService = identityService;
return this;
}
public TaskService getTaskService() {
return taskService;
}
public ProcessEngineConfigurationImpl setTaskService(TaskService taskService) {
this.taskService = taskService;
return this;
}
public FormService getFormService() {
return formService;
}
public ProcessEngineConfigurationImpl setFormService(FormService formService) {
this.formService = formService;
return this;
}
public ManagementService getManagementService() {
return managementService;
}
public AuthorizationService getAuthorizationService() {
return authorizationService;
}
public ProcessEngineConfigurationImpl setManagementService(ManagementService managementService) {
this.managementService = managementService;
return this;
}
public CaseService getCaseService() {
return caseService;
}
public void setCaseService(CaseService caseService) {
this.caseService = caseService;
}
public FilterService getFilterService() {
return filterService;
}
public void setFilterService(FilterService filterService) {
this.filterService = filterService;
}
public ExternalTaskService getExternalTaskService() {
return externalTaskService;
}
public void setExternalTaskService(ExternalTaskService externalTaskService) {
this.externalTaskService = externalTaskService;
}
public DecisionService getDecisionService() {
return decisionService;
}
public OptimizeService getOptimizeService() {
return optimizeService;
}
public void setDecisionService(DecisionService decisionService) {
this.decisionService = decisionService;
}
public Map<Class<?>, SessionFactory> getSessionFactories() {
return sessionFactories;
}
public ProcessEngineConfigurationImpl setSessionFactories(Map<Class<?>, SessionFactory> sessionFactories) {
this.sessionFactories = sessionFactories;
return this;
}
public List<Deployer> getDeployers() {
return deployers;
}
public ProcessEngineConfigurationImpl setDeployers(List<Deployer> deployers) {
this.deployers = deployers;
return this;
}
public JobExecutor getJobExecutor() {
return jobExecutor;
}
public ProcessEngineConfigurationImpl setJobExecutor(JobExecutor jobExecutor) {
this.jobExecutor = jobExecutor;
return this;
}
public PriorityProvider<JobDeclaration<?, ?>> getJobPriorityProvider() {
return jobPriorityProvider;
}
public void setJobPriorityProvider(PriorityProvider<JobDeclaration<?, ?>> jobPriorityProvider) {
this.jobPriorityProvider = jobPriorityProvider;
}
public PriorityProvider<ExternalTaskActivityBehavior> getExternalTaskPriorityProvider() {
return externalTaskPriorityProvider;
}
public void setExternalTaskPriorityProvider(PriorityProvider<ExternalTaskActivityBehavior> externalTaskPriorityProvider) {
this.externalTaskPriorityProvider = externalTaskPriorityProvider;
}
public IdGenerator getIdGenerator() {
return idGenerator;
}
public ProcessEngineConfigurationImpl setIdGenerator(IdGenerator idGenerator) {
this.idGenerator = idGenerator;
return this;
}
public String getWsSyncFactoryClassName() {
return wsSyncFactoryClassName;
}
public ProcessEngineConfigurationImpl setWsSyncFactoryClassName(String wsSyncFactoryClassName) {
this.wsSyncFactoryClassName = wsSyncFactoryClassName;
return this;
}
public Map<String, FormEngine> getFormEngines() {
return formEngines;
}
public ProcessEngineConfigurationImpl setFormEngines(Map<String, FormEngine> formEngines) {
this.formEngines = formEngines;
return this;
}
public FormTypes getFormTypes() {
return formTypes;
}
public ProcessEngineConfigurationImpl setFormTypes(FormTypes formTypes) {
this.formTypes = formTypes;
return this;
}
public ScriptingEngines getScriptingEngines() {
return scriptingEngines;
}
public ProcessEngineConfigurationImpl setScriptingEngines(ScriptingEngines scriptingEngines) {
this.scriptingEngines = scriptingEngines;
return this;
}
public VariableSerializers getVariableSerializers() {
return variableSerializers;
}
public VariableSerializerFactory getFallbackSerializerFactory() {
return fallbackSerializerFactory;
}
public void setFallbackSerializerFactory(VariableSerializerFactory fallbackSerializerFactory) {
this.fallbackSerializerFactory = fallbackSerializerFactory;
}
public ProcessEngineConfigurationImpl setVariableTypes(VariableSerializers variableSerializers) {
this.variableSerializers = variableSerializers;
return this;
}
public ExpressionManager getExpressionManager() {
return expressionManager;
}
public ProcessEngineConfigurationImpl setExpressionManager(ExpressionManager expressionManager) {
this.expressionManager = expressionManager;
return this;
}
public BusinessCalendarManager getBusinessCalendarManager() {
return businessCalendarManager;
}
public ProcessEngineConfigurationImpl setBusinessCalendarManager(BusinessCalendarManager businessCalendarManager) {
this.businessCalendarManager = businessCalendarManager;
return this;
}
public CommandContextFactory getCommandContextFactory() {
return commandContextFactory;
}
public ProcessEngineConfigurationImpl setCommandContextFactory(CommandContextFactory commandContextFactory) {
this.commandContextFactory = commandContextFactory;
return this;
}
public TransactionContextFactory getTransactionContextFactory() {
return transactionContextFactory;
}
public ProcessEngineConfigurationImpl setTransactionContextFactory(TransactionContextFactory transactionContextFactory) {
this.transactionContextFactory = transactionContextFactory;
return this;
}
public List<Deployer> getCustomPreDeployers() {
return customPreDeployers;
}
public ProcessEngineConfigurationImpl setCustomPreDeployers(List<Deployer> customPreDeployers) {
this.customPreDeployers = customPreDeployers;
return this;
}
public List<Deployer> getCustomPostDeployers() {
return customPostDeployers;
}
public ProcessEngineConfigurationImpl setCustomPostDeployers(List<Deployer> customPostDeployers) {
this.customPostDeployers = customPostDeployers;
return this;
}
public void setCacheFactory(CacheFactory cacheFactory) {
this.cacheFactory = cacheFactory;
}
public void setCacheCapacity(int cacheCapacity) {
this.cacheCapacity = cacheCapacity;
}
public void setEnableFetchProcessDefinitionDescription(boolean enableFetchProcessDefinitionDescription){
this.enableFetchProcessDefinitionDescription = enableFetchProcessDefinitionDescription;
}
public boolean getEnableFetchProcessDefinitionDescription() {
return this.enableFetchProcessDefinitionDescription;
}
public Permission getDefaultUserPermissionForTask() {
return defaultUserPermissionForTask;
}
public ProcessEngineConfigurationImpl setDefaultUserPermissionForTask(Permission defaultUserPermissionForTask) {
this.defaultUserPermissionForTask = defaultUserPermissionForTask;
return this;
}
public Map<String, JobHandler> getJobHandlers() {
return jobHandlers;
}
public ProcessEngineConfigurationImpl setJobHandlers(Map<String, JobHandler> jobHandlers) {
this.jobHandlers = jobHandlers;
return this;
}
public SqlSessionFactory getSqlSessionFactory() {
return sqlSessionFactory;
}
public ProcessEngineConfigurationImpl setSqlSessionFactory(SqlSessionFactory sqlSessionFactory) {
this.sqlSessionFactory = sqlSessionFactory;
return this;
}
public DbSqlSessionFactory getDbSqlSessionFactory() {
return dbSqlSessionFactory;
}
public ProcessEngineConfigurationImpl setDbSqlSessionFactory(DbSqlSessionFactory dbSqlSessionFactory) {
this.dbSqlSessionFactory = dbSqlSessionFactory;
return this;
}
public TransactionFactory getTransactionFactory() {
return transactionFactory;
}
public ProcessEngineConfigurationImpl setTransactionFactory(TransactionFactory transactionFactory) {
this.transactionFactory = transactionFactory;
return this;
}
public List<SessionFactory> getCustomSessionFactories() {
return customSessionFactories;
}
public ProcessEngineConfigurationImpl setCustomSessionFactories(List<SessionFactory> customSessionFactories) {
this.customSessionFactories = customSessionFactories;
return this;
}
public List<JobHandler> getCustomJobHandlers() {
return customJobHandlers;
}
public ProcessEngineConfigurationImpl setCustomJobHandlers(List<JobHandler> customJobHandlers) {
this.customJobHandlers = customJobHandlers;
return this;
}
public List<FormEngine> getCustomFormEngines() {
return customFormEngines;
}
public ProcessEngineConfigurationImpl setCustomFormEngines(List<FormEngine> customFormEngines) {
this.customFormEngines = customFormEngines;
return this;
}
public List<AbstractFormFieldType> getCustomFormTypes() {
return customFormTypes;
}
public ProcessEngineConfigurationImpl setCustomFormTypes(List<AbstractFormFieldType> customFormTypes) {
this.customFormTypes = customFormTypes;
return this;
}
public List<TypedValueSerializer> getCustomPreVariableSerializers() {
return customPreVariableSerializers;
}
public ProcessEngineConfigurationImpl setCustomPreVariableSerializers(List<TypedValueSerializer> customPreVariableTypes) {
this.customPreVariableSerializers = customPreVariableTypes;
return this;
}
public List<TypedValueSerializer> getCustomPostVariableSerializers() {
return customPostVariableSerializers;
}
public ProcessEngineConfigurationImpl setCustomPostVariableSerializers(List<TypedValueSerializer> customPostVariableTypes) {
this.customPostVariableSerializers = customPostVariableTypes;
return this;
}
public List<BpmnParseListener> getCustomPreBPMNParseListeners() {
return preParseListeners;
}
public void setCustomPreBPMNParseListeners(List<BpmnParseListener> preParseListeners) {
this.preParseListeners = preParseListeners;
}
public List<BpmnParseListener> getCustomPostBPMNParseListeners() {
return postParseListeners;
}
public void setCustomPostBPMNParseListeners(List<BpmnParseListener> postParseListeners) {
this.postParseListeners = postParseListeners;
}
/**
* @deprecated use {@link #getCustomPreBPMNParseListeners} instead.
*/
@Deprecated
public List<BpmnParseListener> getPreParseListeners() {
return preParseListeners;
}
/**
* @deprecated use {@link #setCustomPreBPMNParseListeners} instead.
*/
@Deprecated
public void setPreParseListeners(List<BpmnParseListener> preParseListeners) {
this.preParseListeners = preParseListeners;
}
/**
* @deprecated use {@link #getCustomPostBPMNParseListeners} instead.
*/
@Deprecated
public List<BpmnParseListener> getPostParseListeners() {
return postParseListeners;
}
/**
* @deprecated use {@link #setCustomPostBPMNParseListeners} instead.
*/
@Deprecated
public void setPostParseListeners(List<BpmnParseListener> postParseListeners) {
this.postParseListeners = postParseListeners;
}
public List<CmmnTransformListener> getCustomPreCmmnTransformListeners() {
return customPreCmmnTransformListeners;
}
public void setCustomPreCmmnTransformListeners(List<CmmnTransformListener> customPreCmmnTransformListeners) {
this.customPreCmmnTransformListeners = customPreCmmnTransformListeners;
}
public List<CmmnTransformListener> getCustomPostCmmnTransformListeners() {
return customPostCmmnTransformListeners;
}
public void setCustomPostCmmnTransformListeners(List<CmmnTransformListener> customPostCmmnTransformListeners) {
this.customPostCmmnTransformListeners = customPostCmmnTransformListeners;
}
public Map<Object, Object> getBeans() {
return beans;
}
public void setBeans(Map<Object, Object> beans) {
this.beans = beans;
}
@Override
public ProcessEngineConfigurationImpl setClassLoader(ClassLoader classLoader) {
super.setClassLoader(classLoader);
return this;
}
@Override
public ProcessEngineConfigurationImpl setDatabaseType(String databaseType) {
super.setDatabaseType(databaseType);
return this;
}
@Override
public ProcessEngineConfigurationImpl setDataSource(DataSource dataSource) {
super.setDataSource(dataSource);
return this;
}
@Override
public ProcessEngineConfigurationImpl setDatabaseSchemaUpdate(String databaseSchemaUpdate) {
super.setDatabaseSchemaUpdate(databaseSchemaUpdate);
return this;
}
@Override
public ProcessEngineConfigurationImpl setHistory(String history) {
super.setHistory(history);
return this;
}
@Override
public ProcessEngineConfigurationImpl setIdBlockSize(int idBlockSize) {
super.setIdBlockSize(idBlockSize);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcDriver(String jdbcDriver) {
super.setJdbcDriver(jdbcDriver);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcPassword(String jdbcPassword) {
super.setJdbcPassword(jdbcPassword);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcUrl(String jdbcUrl) {
super.setJdbcUrl(jdbcUrl);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcUsername(String jdbcUsername) {
super.setJdbcUsername(jdbcUsername);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJobExecutorActivate(boolean jobExecutorActivate) {
super.setJobExecutorActivate(jobExecutorActivate);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerDefaultFrom(String mailServerDefaultFrom) {
super.setMailServerDefaultFrom(mailServerDefaultFrom);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerHost(String mailServerHost) {
super.setMailServerHost(mailServerHost);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerPassword(String mailServerPassword) {
super.setMailServerPassword(mailServerPassword);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerPort(int mailServerPort) {
super.setMailServerPort(mailServerPort);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerUseTLS(boolean useTLS) {
super.setMailServerUseTLS(useTLS);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerUsername(String mailServerUsername) {
super.setMailServerUsername(mailServerUsername);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcMaxActiveConnections(int jdbcMaxActiveConnections) {
super.setJdbcMaxActiveConnections(jdbcMaxActiveConnections);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcMaxCheckoutTime(int jdbcMaxCheckoutTime) {
super.setJdbcMaxCheckoutTime(jdbcMaxCheckoutTime);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcMaxIdleConnections(int jdbcMaxIdleConnections) {
super.setJdbcMaxIdleConnections(jdbcMaxIdleConnections);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcMaxWaitTime(int jdbcMaxWaitTime) {
super.setJdbcMaxWaitTime(jdbcMaxWaitTime);
return this;
}
@Override
public ProcessEngineConfigurationImpl setTransactionsExternallyManaged(boolean transactionsExternallyManaged) {
super.setTransactionsExternallyManaged(transactionsExternallyManaged);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJpaEntityManagerFactory(Object jpaEntityManagerFactory) {
this.jpaEntityManagerFactory = jpaEntityManagerFactory;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJpaHandleTransaction(boolean jpaHandleTransaction) {
this.jpaHandleTransaction = jpaHandleTransaction;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJpaCloseEntityManager(boolean jpaCloseEntityManager) {
this.jpaCloseEntityManager = jpaCloseEntityManager;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcPingEnabled(boolean jdbcPingEnabled) {
this.jdbcPingEnabled = jdbcPingEnabled;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcPingQuery(String jdbcPingQuery) {
this.jdbcPingQuery = jdbcPingQuery;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcPingConnectionNotUsedFor(int jdbcPingNotUsedFor) {
this.jdbcPingConnectionNotUsedFor = jdbcPingNotUsedFor;
return this;
}
public boolean isDbIdentityUsed() {
return isDbIdentityUsed;
}
public void setDbIdentityUsed(boolean isDbIdentityUsed) {
this.isDbIdentityUsed = isDbIdentityUsed;
}
public boolean isDbHistoryUsed() {
return isDbHistoryUsed;
}
public void setDbHistoryUsed(boolean isDbHistoryUsed) {
this.isDbHistoryUsed = isDbHistoryUsed;
}
public List<ResolverFactory> getResolverFactories() {
return resolverFactories;
}
public void setResolverFactories(List<ResolverFactory> resolverFactories) {
this.resolverFactories = resolverFactories;
}
public DeploymentCache getDeploymentCache() {
return deploymentCache;
}
public void setDeploymentCache(DeploymentCache deploymentCache) {
this.deploymentCache = deploymentCache;
}
public DeploymentHandlerFactory getDeploymentHandlerFactory() {
return deploymentHandlerFactory;
}
public ProcessEngineConfigurationImpl setDeploymentHandlerFactory(DeploymentHandlerFactory deploymentHandlerFactory) {
this.deploymentHandlerFactory = deploymentHandlerFactory;
return this;
}
public ProcessEngineConfigurationImpl setDelegateInterceptor(DelegateInterceptor delegateInterceptor) {
this.delegateInterceptor = delegateInterceptor;
return this;
}
public DelegateInterceptor getDelegateInterceptor() {
return delegateInterceptor;
}
public RejectedJobsHandler getCustomRejectedJobsHandler() {
return customRejectedJobsHandler;
}
public ProcessEngineConfigurationImpl setCustomRejectedJobsHandler(RejectedJobsHandler customRejectedJobsHandler) {
this.customRejectedJobsHandler = customRejectedJobsHandler;
return this;
}
public EventHandler getEventHandler(String eventType) {
return eventHandlers.get(eventType);
}
public void setEventHandlers(Map<String, EventHandler> eventHandlers) {
this.eventHandlers = eventHandlers;
}
public Map<String, EventHandler> getEventHandlers() {
return eventHandlers;
}
public List<EventHandler> getCustomEventHandlers() {
return customEventHandlers;
}
public void setCustomEventHandlers(List<EventHandler> customEventHandlers) {
this.customEventHandlers = customEventHandlers;
}
public FailedJobCommandFactory getFailedJobCommandFactory() {
return failedJobCommandFactory;
}
public ProcessEngineConfigurationImpl setFailedJobCommandFactory(FailedJobCommandFactory failedJobCommandFactory) {
this.failedJobCommandFactory = failedJobCommandFactory;
return this;
}
/**
* Allows configuring a database table prefix which is used for all runtime operations of the process engine.
* For example, if you specify a prefix named 'PRE1.', activiti will query for executions in a table named
* 'PRE1.ACT_RU_EXECUTION_'.
* <p>
* <p/>
* <strong>NOTE: the prefix is not respected by automatic database schema management. If you use
* {@link ProcessEngineConfiguration#DB_SCHEMA_UPDATE_CREATE_DROP}
* or {@link ProcessEngineConfiguration#DB_SCHEMA_UPDATE_TRUE}, activiti will create the database tables
* using the default names, regardless of the prefix configured here.</strong>
*
* @since 5.9
*/
public ProcessEngineConfiguration setDatabaseTablePrefix(String databaseTablePrefix) {
this.databaseTablePrefix = databaseTablePrefix;
return this;
}
public String getDatabaseTablePrefix() {
return databaseTablePrefix;
}
public boolean isCreateDiagramOnDeploy() {
return isCreateDiagramOnDeploy;
}
public ProcessEngineConfiguration setCreateDiagramOnDeploy(boolean createDiagramOnDeploy) {
this.isCreateDiagramOnDeploy = createDiagramOnDeploy;
return this;
}
public String getDatabaseSchema() {
return databaseSchema;
}
public void setDatabaseSchema(String databaseSchema) {
this.databaseSchema = databaseSchema;
}
public DataSource getIdGeneratorDataSource() {
return idGeneratorDataSource;
}
public void setIdGeneratorDataSource(DataSource idGeneratorDataSource) {
this.idGeneratorDataSource = idGeneratorDataSource;
}
public String getIdGeneratorDataSourceJndiName() {
return idGeneratorDataSourceJndiName;
}
public void setIdGeneratorDataSourceJndiName(String idGeneratorDataSourceJndiName) {
this.idGeneratorDataSourceJndiName = idGeneratorDataSourceJndiName;
}
public ProcessApplicationManager getProcessApplicationManager() {
return processApplicationManager;
}
public void setProcessApplicationManager(ProcessApplicationManager processApplicationManager) {
this.processApplicationManager = processApplicationManager;
}
public CommandExecutor getCommandExecutorSchemaOperations() {
return commandExecutorSchemaOperations;
}
public void setCommandExecutorSchemaOperations(CommandExecutor commandExecutorSchemaOperations) {
this.commandExecutorSchemaOperations = commandExecutorSchemaOperations;
}
public CorrelationHandler getCorrelationHandler() {
return correlationHandler;
}
public void setCorrelationHandler(CorrelationHandler correlationHandler) {
this.correlationHandler = correlationHandler;
}
public ConditionHandler getConditionHandler() {
return conditionHandler;
}
public void setConditionHandler(ConditionHandler conditionHandler) {
this.conditionHandler = conditionHandler;
}
public ProcessEngineConfigurationImpl setHistoryEventHandler(HistoryEventHandler historyEventHandler) {
this.historyEventHandler = historyEventHandler;
return this;
}
public HistoryEventHandler getHistoryEventHandler() {
return historyEventHandler;
}
public IncidentHandler getIncidentHandler(String incidentType) {
return incidentHandlers.get(incidentType);
}
public Map<String, IncidentHandler> getIncidentHandlers() {
return incidentHandlers;
}
public void setIncidentHandlers(Map<String, IncidentHandler> incidentHandlers) {
this.incidentHandlers = incidentHandlers;
}
public List<IncidentHandler> getCustomIncidentHandlers() {
return customIncidentHandlers;
}
public void setCustomIncidentHandlers(List<IncidentHandler> customIncidentHandlers) {
this.customIncidentHandlers = customIncidentHandlers;
}
public Map<String, BatchJobHandler<?>> getBatchHandlers() {
return batchHandlers;
}
public void setBatchHandlers(Map<String, BatchJobHandler<?>> batchHandlers) {
this.batchHandlers = batchHandlers;
}
public List<BatchJobHandler<?>> getCustomBatchJobHandlers() {
return customBatchJobHandlers;
}
public void setCustomBatchJobHandlers(List<BatchJobHandler<?>> customBatchJobHandlers) {
this.customBatchJobHandlers = customBatchJobHandlers;
}
public int getBatchJobsPerSeed() {
return batchJobsPerSeed;
}
public void setBatchJobsPerSeed(int batchJobsPerSeed) {
this.batchJobsPerSeed = batchJobsPerSeed;
}
public int getInvocationsPerBatchJob() {
return invocationsPerBatchJob;
}
public void setInvocationsPerBatchJob(int invocationsPerBatchJob) {
this.invocationsPerBatchJob = invocationsPerBatchJob;
}
public int getBatchPollTime() {
return batchPollTime;
}
public void setBatchPollTime(int batchPollTime) {
this.batchPollTime = batchPollTime;
}
public long getBatchJobPriority() {
return batchJobPriority;
}
public void setBatchJobPriority(long batchJobPriority) {
this.batchJobPriority = batchJobPriority;
}
public SessionFactory getIdentityProviderSessionFactory() {
return identityProviderSessionFactory;
}
public void setIdentityProviderSessionFactory(SessionFactory identityProviderSessionFactory) {
this.identityProviderSessionFactory = identityProviderSessionFactory;
}
public SaltGenerator getSaltGenerator() {
return saltGenerator;
}
public void setSaltGenerator(SaltGenerator saltGenerator) {
this.saltGenerator = saltGenerator;
}
public void setPasswordEncryptor(PasswordEncryptor passwordEncryptor) {
this.passwordEncryptor = passwordEncryptor;
}
public PasswordEncryptor getPasswordEncryptor() {
return passwordEncryptor;
}
public List<PasswordEncryptor> getCustomPasswordChecker() {
return customPasswordChecker;
}
public void setCustomPasswordChecker(List<PasswordEncryptor> customPasswordChecker) {
this.customPasswordChecker = customPasswordChecker;
}
public PasswordManager getPasswordManager() {
return passwordManager;
}
public void setPasswordManager(PasswordManager passwordManager) {
this.passwordManager = passwordManager;
}
public Set<String> getRegisteredDeployments() {
return registeredDeployments;
}
public void setRegisteredDeployments(Set<String> registeredDeployments) {
this.registeredDeployments = registeredDeployments;
}
public ResourceAuthorizationProvider getResourceAuthorizationProvider() {
return resourceAuthorizationProvider;
}
public void setResourceAuthorizationProvider(ResourceAuthorizationProvider resourceAuthorizationProvider) {
this.resourceAuthorizationProvider = resourceAuthorizationProvider;
}
public PermissionProvider getPermissionProvider() {
return permissionProvider;
}
public void setPermissionProvider(PermissionProvider permissionProvider) {
this.permissionProvider = permissionProvider;
}
public List<ProcessEnginePlugin> getProcessEnginePlugins() {
return processEnginePlugins;
}
public void setProcessEnginePlugins(List<ProcessEnginePlugin> processEnginePlugins) {
this.processEnginePlugins = processEnginePlugins;
}
public ProcessEngineConfigurationImpl setHistoryEventProducer(HistoryEventProducer historyEventProducer) {
this.historyEventProducer = historyEventProducer;
return this;
}
public HistoryEventProducer getHistoryEventProducer() {
return historyEventProducer;
}
public ProcessEngineConfigurationImpl setCmmnHistoryEventProducer(CmmnHistoryEventProducer cmmnHistoryEventProducer) {
this.cmmnHistoryEventProducer = cmmnHistoryEventProducer;
return this;
}
public CmmnHistoryEventProducer getCmmnHistoryEventProducer() {
return cmmnHistoryEventProducer;
}
public ProcessEngineConfigurationImpl setDmnHistoryEventProducer(DmnHistoryEventProducer dmnHistoryEventProducer) {
this.dmnHistoryEventProducer = dmnHistoryEventProducer;
return this;
}
public DmnHistoryEventProducer getDmnHistoryEventProducer() {
return dmnHistoryEventProducer;
}
public Map<String, Class<? extends FormFieldValidator>> getCustomFormFieldValidators() {
return customFormFieldValidators;
}
public void setCustomFormFieldValidators(Map<String, Class<? extends FormFieldValidator>> customFormFieldValidators) {
this.customFormFieldValidators = customFormFieldValidators;
}
public void setFormValidators(FormValidators formValidators) {
this.formValidators = formValidators;
}
public FormValidators getFormValidators() {
return formValidators;
}
public boolean isExecutionTreePrefetchEnabled() {
return isExecutionTreePrefetchEnabled;
}
public void setExecutionTreePrefetchEnabled(boolean isExecutionTreePrefetchingEnabled) {
this.isExecutionTreePrefetchEnabled = isExecutionTreePrefetchingEnabled;
}
public ProcessEngineImpl getProcessEngine() {
return processEngine;
}
/**
* If set to true, the process engine will save all script variables (created from Java Script, Groovy ...)
* as process variables.
*/
public void setAutoStoreScriptVariables(boolean autoStoreScriptVariables) {
this.autoStoreScriptVariables = autoStoreScriptVariables;
}
/**
* @return true if the process engine should save all script variables (created from Java Script, Groovy ...)
* as process variables.
*/
public boolean isAutoStoreScriptVariables() {
return autoStoreScriptVariables;
}
/**
* If set to true, the process engine will attempt to pre-compile script sources at runtime
* to optimize script task execution performance.
*/
public void setEnableScriptCompilation(boolean enableScriptCompilation) {
this.enableScriptCompilation = enableScriptCompilation;
}
/**
* @return true if compilation of script sources ins enabled. False otherwise.
*/
public boolean isEnableScriptCompilation() {
return enableScriptCompilation;
}
public boolean isEnableGracefulDegradationOnContextSwitchFailure() {
return enableGracefulDegradationOnContextSwitchFailure;
}
/**
* <p>If set to true, the process engine will tolerate certain exceptions that may result
* from the fact that it cannot switch to the context of a process application that has made
* a deployment.</p>
* <p>
* <p>Affects the following scenarios:</p>
* <ul>
* <li><b>Determining job priorities</b>: uses a default priority in case an expression fails to evaluate</li>
* </ul>
*/
public void setEnableGracefulDegradationOnContextSwitchFailure(boolean enableGracefulDegradationOnContextSwitchFailure) {
this.enableGracefulDegradationOnContextSwitchFailure = enableGracefulDegradationOnContextSwitchFailure;
}
/**
* @return true if the process engine acquires an exclusive lock when creating a deployment.
*/
public boolean isDeploymentLockUsed() {
return isDeploymentLockUsed;
}
/**
* If set to true, the process engine will acquire an exclusive lock when creating a deployment.
* This ensures that {@link DeploymentBuilder#enableDuplicateFiltering()} works correctly in a clustered environment.
*/
public void setDeploymentLockUsed(boolean isDeploymentLockUsed) {
this.isDeploymentLockUsed = isDeploymentLockUsed;
}
/**
* @return true if deployment processing must be synchronized
*/
public boolean isDeploymentSynchronized() {
return isDeploymentSynchronized;
}
/**
* Sets if deployment processing must be synchronized.
* @param deploymentSynchronized {@code true} when deployment must be synchronized,
* {@code false} when several depoloyments may be processed in parallel
*/
public void setDeploymentSynchronized(boolean deploymentSynchronized) {
isDeploymentSynchronized = deploymentSynchronized;
}
public boolean isCmmnEnabled() {
return cmmnEnabled;
}
public void setCmmnEnabled(boolean cmmnEnabled) {
this.cmmnEnabled = cmmnEnabled;
}
public boolean isDmnEnabled() {
return dmnEnabled;
}
public void setDmnEnabled(boolean dmnEnabled) {
this.dmnEnabled = dmnEnabled;
}
public ScriptFactory getScriptFactory() {
return scriptFactory;
}
public ScriptingEnvironment getScriptingEnvironment() {
return scriptingEnvironment;
}
public void setScriptFactory(ScriptFactory scriptFactory) {
this.scriptFactory = scriptFactory;
}
public void setScriptingEnvironment(ScriptingEnvironment scriptingEnvironment) {
this.scriptingEnvironment = scriptingEnvironment;
}
public List<ScriptEnvResolver> getEnvScriptResolvers() {
return scriptEnvResolvers;
}
public void setEnvScriptResolvers(List<ScriptEnvResolver> scriptEnvResolvers) {
this.scriptEnvResolvers = scriptEnvResolvers;
}
public ProcessEngineConfiguration setArtifactFactory(ArtifactFactory artifactFactory) {
this.artifactFactory = artifactFactory;
return this;
}
public ArtifactFactory getArtifactFactory() {
return artifactFactory;
}
public String getDefaultSerializationFormat() {
return defaultSerializationFormat;
}
public ProcessEngineConfigurationImpl setDefaultSerializationFormat(String defaultSerializationFormat) {
this.defaultSerializationFormat = defaultSerializationFormat;
return this;
}
public boolean isJavaSerializationFormatEnabled() {
return javaSerializationFormatEnabled;
}
public void setJavaSerializationFormatEnabled(boolean javaSerializationFormatEnabled) {
this.javaSerializationFormatEnabled = javaSerializationFormatEnabled;
}
public ProcessEngineConfigurationImpl setDefaultCharsetName(String defaultCharsetName) {
this.defaultCharsetName = defaultCharsetName;
return this;
}
public ProcessEngineConfigurationImpl setDefaultCharset(Charset defautlCharset) {
this.defaultCharset = defautlCharset;
return this;
}
public Charset getDefaultCharset() {
return defaultCharset;
}
public boolean isDbEntityCacheReuseEnabled() {
return isDbEntityCacheReuseEnabled;
}
public ProcessEngineConfigurationImpl setDbEntityCacheReuseEnabled(boolean isDbEntityCacheReuseEnabled) {
this.isDbEntityCacheReuseEnabled = isDbEntityCacheReuseEnabled;
return this;
}
public DbEntityCacheKeyMapping getDbEntityCacheKeyMapping() {
return dbEntityCacheKeyMapping;
}
public ProcessEngineConfigurationImpl setDbEntityCacheKeyMapping(DbEntityCacheKeyMapping dbEntityCacheKeyMapping) {
this.dbEntityCacheKeyMapping = dbEntityCacheKeyMapping;
return this;
}
public ProcessEngineConfigurationImpl setCustomHistoryLevels(List<HistoryLevel> customHistoryLevels) {
this.customHistoryLevels = customHistoryLevels;
return this;
}
public List<HistoryLevel> getHistoryLevels() {
return historyLevels;
}
public List<HistoryLevel> getCustomHistoryLevels() {
return customHistoryLevels;
}
public boolean isInvokeCustomVariableListeners() {
return isInvokeCustomVariableListeners;
}
public ProcessEngineConfigurationImpl setInvokeCustomVariableListeners(boolean isInvokeCustomVariableListeners) {
this.isInvokeCustomVariableListeners = isInvokeCustomVariableListeners;
return this;
}
public void close() {
if (forceCloseMybatisConnectionPool
&& dataSource instanceof PooledDataSource) {
// ACT-233: connection pool of Ibatis is not properely initialized if this is not called!
((PooledDataSource) dataSource).forceCloseAll();
}
}
public MetricsRegistry getMetricsRegistry() {
return metricsRegistry;
}
public ProcessEngineConfigurationImpl setMetricsRegistry(MetricsRegistry metricsRegistry) {
this.metricsRegistry = metricsRegistry;
return this;
}
public ProcessEngineConfigurationImpl setMetricsEnabled(boolean isMetricsEnabled) {
this.isMetricsEnabled = isMetricsEnabled;
return this;
}
public boolean isMetricsEnabled() {
return isMetricsEnabled;
}
public DbMetricsReporter getDbMetricsReporter() {
return dbMetricsReporter;
}
public ProcessEngineConfigurationImpl setDbMetricsReporter(DbMetricsReporter dbMetricsReporter) {
this.dbMetricsReporter = dbMetricsReporter;
return this;
}
public boolean isDbMetricsReporterActivate() {
return isDbMetricsReporterActivate;
}
public ProcessEngineConfigurationImpl setDbMetricsReporterActivate(boolean isDbMetricsReporterEnabled) {
this.isDbMetricsReporterActivate = isDbMetricsReporterEnabled;
return this;
}
public MetricsReporterIdProvider getMetricsReporterIdProvider() {
return metricsReporterIdProvider;
}
public void setMetricsReporterIdProvider(MetricsReporterIdProvider metricsReporterIdProvider) {
this.metricsReporterIdProvider = metricsReporterIdProvider;
}
public boolean isEnableScriptEngineCaching() {
return enableScriptEngineCaching;
}
public ProcessEngineConfigurationImpl setEnableScriptEngineCaching(boolean enableScriptEngineCaching) {
this.enableScriptEngineCaching = enableScriptEngineCaching;
return this;
}
public boolean isEnableFetchScriptEngineFromProcessApplication() {
return enableFetchScriptEngineFromProcessApplication;
}
public ProcessEngineConfigurationImpl setEnableFetchScriptEngineFromProcessApplication(boolean enable) {
this.enableFetchScriptEngineFromProcessApplication = enable;
return this;
}
public boolean isEnableExpressionsInAdhocQueries() {
return enableExpressionsInAdhocQueries;
}
public void setEnableExpressionsInAdhocQueries(boolean enableExpressionsInAdhocQueries) {
this.enableExpressionsInAdhocQueries = enableExpressionsInAdhocQueries;
}
public boolean isEnableExpressionsInStoredQueries() {
return enableExpressionsInStoredQueries;
}
public void setEnableExpressionsInStoredQueries(boolean enableExpressionsInStoredQueries) {
this.enableExpressionsInStoredQueries = enableExpressionsInStoredQueries;
}
public boolean isEnableXxeProcessing() {
return enableXxeProcessing;
}
public void setEnableXxeProcessing(boolean enableXxeProcessing) {
this.enableXxeProcessing = enableXxeProcessing;
}
public ProcessEngineConfigurationImpl setBpmnStacktraceVerbose(boolean isBpmnStacktraceVerbose) {
this.isBpmnStacktraceVerbose = isBpmnStacktraceVerbose;
return this;
}
public boolean isBpmnStacktraceVerbose() {
return this.isBpmnStacktraceVerbose;
}
public boolean isForceCloseMybatisConnectionPool() {
return forceCloseMybatisConnectionPool;
}
public ProcessEngineConfigurationImpl setForceCloseMybatisConnectionPool(boolean forceCloseMybatisConnectionPool) {
this.forceCloseMybatisConnectionPool = forceCloseMybatisConnectionPool;
return this;
}
public boolean isRestrictUserOperationLogToAuthenticatedUsers() {
return restrictUserOperationLogToAuthenticatedUsers;
}
public ProcessEngineConfigurationImpl setRestrictUserOperationLogToAuthenticatedUsers(boolean restrictUserOperationLogToAuthenticatedUsers) {
this.restrictUserOperationLogToAuthenticatedUsers = restrictUserOperationLogToAuthenticatedUsers;
return this;
}
public ProcessEngineConfigurationImpl setTenantIdProvider(TenantIdProvider tenantIdProvider) {
this.tenantIdProvider = tenantIdProvider;
return this;
}
public TenantIdProvider getTenantIdProvider() {
return this.tenantIdProvider;
}
public void setMigrationActivityMatcher(MigrationActivityMatcher migrationActivityMatcher) {
this.migrationActivityMatcher = migrationActivityMatcher;
}
public MigrationActivityMatcher getMigrationActivityMatcher() {
return migrationActivityMatcher;
}
public void setCustomPreMigrationActivityValidators(List<MigrationActivityValidator> customPreMigrationActivityValidators) {
this.customPreMigrationActivityValidators = customPreMigrationActivityValidators;
}
public List<MigrationActivityValidator> getCustomPreMigrationActivityValidators() {
return customPreMigrationActivityValidators;
}
public void setCustomPostMigrationActivityValidators(List<MigrationActivityValidator> customPostMigrationActivityValidators) {
this.customPostMigrationActivityValidators = customPostMigrationActivityValidators;
}
public List<MigrationActivityValidator> getCustomPostMigrationActivityValidators() {
return customPostMigrationActivityValidators;
}
public List<MigrationActivityValidator> getDefaultMigrationActivityValidators() {
List<MigrationActivityValidator> migrationActivityValidators = new ArrayList<>();
migrationActivityValidators.add(SupportedActivityValidator.INSTANCE);
migrationActivityValidators.add(SupportedPassiveEventTriggerActivityValidator.INSTANCE);
migrationActivityValidators.add(NoCompensationHandlerActivityValidator.INSTANCE);
return migrationActivityValidators;
}
public void setMigrationInstructionGenerator(MigrationInstructionGenerator migrationInstructionGenerator) {
this.migrationInstructionGenerator = migrationInstructionGenerator;
}
public MigrationInstructionGenerator getMigrationInstructionGenerator() {
return migrationInstructionGenerator;
}
public void setMigrationInstructionValidators(List<MigrationInstructionValidator> migrationInstructionValidators) {
this.migrationInstructionValidators = migrationInstructionValidators;
}
public List<MigrationInstructionValidator> getMigrationInstructionValidators() {
return migrationInstructionValidators;
}
public void setCustomPostMigrationInstructionValidators(List<MigrationInstructionValidator> customPostMigrationInstructionValidators) {
this.customPostMigrationInstructionValidators = customPostMigrationInstructionValidators;
}
public List<MigrationInstructionValidator> getCustomPostMigrationInstructionValidators() {
return customPostMigrationInstructionValidators;
}
public void setCustomPreMigrationInstructionValidators(List<MigrationInstructionValidator> customPreMigrationInstructionValidators) {
this.customPreMigrationInstructionValidators = customPreMigrationInstructionValidators;
}
public List<MigrationInstructionValidator> getCustomPreMigrationInstructionValidators() {
return customPreMigrationInstructionValidators;
}
public List<MigrationInstructionValidator> getDefaultMigrationInstructionValidators() {
List<MigrationInstructionValidator> migrationInstructionValidators = new ArrayList<>();
migrationInstructionValidators.add(new SameBehaviorInstructionValidator());
migrationInstructionValidators.add(new SameEventTypeValidator());
migrationInstructionValidators.add(new OnlyOnceMappedActivityInstructionValidator());
migrationInstructionValidators.add(new CannotAddMultiInstanceBodyValidator());
migrationInstructionValidators.add(new CannotAddMultiInstanceInnerActivityValidator());
migrationInstructionValidators.add(new CannotRemoveMultiInstanceInnerActivityValidator());
migrationInstructionValidators.add(new GatewayMappingValidator());
migrationInstructionValidators.add(new SameEventScopeInstructionValidator());
migrationInstructionValidators.add(new UpdateEventTriggersValidator());
migrationInstructionValidators.add(new AdditionalFlowScopeInstructionValidator());
migrationInstructionValidators.add(new ConditionalEventUpdateEventTriggerValidator());
return migrationInstructionValidators;
}
public void setMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators) {
this.migratingActivityInstanceValidators = migratingActivityInstanceValidators;
}
public List<MigratingActivityInstanceValidator> getMigratingActivityInstanceValidators() {
return migratingActivityInstanceValidators;
}
public void setCustomPostMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> customPostMigratingActivityInstanceValidators) {
this.customPostMigratingActivityInstanceValidators = customPostMigratingActivityInstanceValidators;
}
public List<MigratingActivityInstanceValidator> getCustomPostMigratingActivityInstanceValidators() {
return customPostMigratingActivityInstanceValidators;
}
public void setCustomPreMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> customPreMigratingActivityInstanceValidators) {
this.customPreMigratingActivityInstanceValidators = customPreMigratingActivityInstanceValidators;
}
public List<MigratingActivityInstanceValidator> getCustomPreMigratingActivityInstanceValidators() {
return customPreMigratingActivityInstanceValidators;
}
public List<MigratingTransitionInstanceValidator> getMigratingTransitionInstanceValidators() {
return migratingTransitionInstanceValidators;
}
public List<MigratingCompensationInstanceValidator> getMigratingCompensationInstanceValidators() {
return migratingCompensationInstanceValidators;
}
public List<MigratingActivityInstanceValidator> getDefaultMigratingActivityInstanceValidators() {
List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators = new ArrayList<>();
migratingActivityInstanceValidators.add(new NoUnmappedLeafInstanceValidator());
migratingActivityInstanceValidators.add(new VariableConflictActivityInstanceValidator());
migratingActivityInstanceValidators.add(new SupportedActivityInstanceValidator());
return migratingActivityInstanceValidators;
}
public List<MigratingTransitionInstanceValidator> getDefaultMigratingTransitionInstanceValidators() {
List<MigratingTransitionInstanceValidator> migratingTransitionInstanceValidators = new ArrayList<>();
migratingTransitionInstanceValidators.add(new NoUnmappedLeafInstanceValidator());
migratingTransitionInstanceValidators.add(new AsyncAfterMigrationValidator());
migratingTransitionInstanceValidators.add(new AsyncProcessStartMigrationValidator());
migratingTransitionInstanceValidators.add(new AsyncMigrationValidator());
return migratingTransitionInstanceValidators;
}
public List<CommandChecker> getCommandCheckers() {
return commandCheckers;
}
public void setCommandCheckers(List<CommandChecker> commandCheckers) {
this.commandCheckers = commandCheckers;
}
public ProcessEngineConfigurationImpl setUseSharedSqlSessionFactory(boolean isUseSharedSqlSessionFactory) {
this.isUseSharedSqlSessionFactory = isUseSharedSqlSessionFactory;
return this;
}
public boolean isUseSharedSqlSessionFactory() {
return isUseSharedSqlSessionFactory;
}
public boolean getDisableStrictCallActivityValidation() {
return disableStrictCallActivityValidation;
}
public void setDisableStrictCallActivityValidation(boolean disableStrictCallActivityValidation) {
this.disableStrictCallActivityValidation = disableStrictCallActivityValidation;
}
public String getHistoryCleanupBatchWindowStartTime() {
return historyCleanupBatchWindowStartTime;
}
public void setHistoryCleanupBatchWindowStartTime(String historyCleanupBatchWindowStartTime) {
this.historyCleanupBatchWindowStartTime = historyCleanupBatchWindowStartTime;
}
public String getHistoryCleanupBatchWindowEndTime() {
return historyCleanupBatchWindowEndTime;
}
public void setHistoryCleanupBatchWindowEndTime(String historyCleanupBatchWindowEndTime) {
this.historyCleanupBatchWindowEndTime = historyCleanupBatchWindowEndTime;
}
public String getMondayHistoryCleanupBatchWindowStartTime() {
return mondayHistoryCleanupBatchWindowStartTime;
}
public void setMondayHistoryCleanupBatchWindowStartTime(String mondayHistoryCleanupBatchWindowStartTime) {
this.mondayHistoryCleanupBatchWindowStartTime = mondayHistoryCleanupBatchWindowStartTime;
}
public String getMondayHistoryCleanupBatchWindowEndTime() {
return mondayHistoryCleanupBatchWindowEndTime;
}
public void setMondayHistoryCleanupBatchWindowEndTime(String mondayHistoryCleanupBatchWindowEndTime) {
this.mondayHistoryCleanupBatchWindowEndTime = mondayHistoryCleanupBatchWindowEndTime;
}
public String getTuesdayHistoryCleanupBatchWindowStartTime() {
return tuesdayHistoryCleanupBatchWindowStartTime;
}
public void setTuesdayHistoryCleanupBatchWindowStartTime(String tuesdayHistoryCleanupBatchWindowStartTime) {
this.tuesdayHistoryCleanupBatchWindowStartTime = tuesdayHistoryCleanupBatchWindowStartTime;
}
public String getTuesdayHistoryCleanupBatchWindowEndTime() {
return tuesdayHistoryCleanupBatchWindowEndTime;
}
public void setTuesdayHistoryCleanupBatchWindowEndTime(String tuesdayHistoryCleanupBatchWindowEndTime) {
this.tuesdayHistoryCleanupBatchWindowEndTime = tuesdayHistoryCleanupBatchWindowEndTime;
}
public String getWednesdayHistoryCleanupBatchWindowStartTime() {
return wednesdayHistoryCleanupBatchWindowStartTime;
}
public void setWednesdayHistoryCleanupBatchWindowStartTime(String wednesdayHistoryCleanupBatchWindowStartTime) {
this.wednesdayHistoryCleanupBatchWindowStartTime = wednesdayHistoryCleanupBatchWindowStartTime;
}
public String getWednesdayHistoryCleanupBatchWindowEndTime() {
return wednesdayHistoryCleanupBatchWindowEndTime;
}
public void setWednesdayHistoryCleanupBatchWindowEndTime(String wednesdayHistoryCleanupBatchWindowEndTime) {
this.wednesdayHistoryCleanupBatchWindowEndTime = wednesdayHistoryCleanupBatchWindowEndTime;
}
public String getThursdayHistoryCleanupBatchWindowStartTime() {
return thursdayHistoryCleanupBatchWindowStartTime;
}
public void setThursdayHistoryCleanupBatchWindowStartTime(String thursdayHistoryCleanupBatchWindowStartTime) {
this.thursdayHistoryCleanupBatchWindowStartTime = thursdayHistoryCleanupBatchWindowStartTime;
}
public String getThursdayHistoryCleanupBatchWindowEndTime() {
return thursdayHistoryCleanupBatchWindowEndTime;
}
public void setThursdayHistoryCleanupBatchWindowEndTime(String thursdayHistoryCleanupBatchWindowEndTime) {
this.thursdayHistoryCleanupBatchWindowEndTime = thursdayHistoryCleanupBatchWindowEndTime;
}
public String getFridayHistoryCleanupBatchWindowStartTime() {
return fridayHistoryCleanupBatchWindowStartTime;
}
public void setFridayHistoryCleanupBatchWindowStartTime(String fridayHistoryCleanupBatchWindowStartTime) {
this.fridayHistoryCleanupBatchWindowStartTime = fridayHistoryCleanupBatchWindowStartTime;
}
public String getFridayHistoryCleanupBatchWindowEndTime() {
return fridayHistoryCleanupBatchWindowEndTime;
}
public void setFridayHistoryCleanupBatchWindowEndTime(String fridayHistoryCleanupBatchWindowEndTime) {
this.fridayHistoryCleanupBatchWindowEndTime = fridayHistoryCleanupBatchWindowEndTime;
}
public String getSaturdayHistoryCleanupBatchWindowStartTime() {
return saturdayHistoryCleanupBatchWindowStartTime;
}
public void setSaturdayHistoryCleanupBatchWindowStartTime(String saturdayHistoryCleanupBatchWindowStartTime) {
this.saturdayHistoryCleanupBatchWindowStartTime = saturdayHistoryCleanupBatchWindowStartTime;
}
public String getSaturdayHistoryCleanupBatchWindowEndTime() {
return saturdayHistoryCleanupBatchWindowEndTime;
}
public void setSaturdayHistoryCleanupBatchWindowEndTime(String saturdayHistoryCleanupBatchWindowEndTime) {
this.saturdayHistoryCleanupBatchWindowEndTime = saturdayHistoryCleanupBatchWindowEndTime;
}
public String getSundayHistoryCleanupBatchWindowStartTime() {
return sundayHistoryCleanupBatchWindowStartTime;
}
public void setSundayHistoryCleanupBatchWindowStartTime(String sundayHistoryCleanupBatchWindowStartTime) {
this.sundayHistoryCleanupBatchWindowStartTime = sundayHistoryCleanupBatchWindowStartTime;
}
public String getSundayHistoryCleanupBatchWindowEndTime() {
return sundayHistoryCleanupBatchWindowEndTime;
}
public void setSundayHistoryCleanupBatchWindowEndTime(String sundayHistoryCleanupBatchWindowEndTime) {
this.sundayHistoryCleanupBatchWindowEndTime = sundayHistoryCleanupBatchWindowEndTime;
}
public Date getHistoryCleanupBatchWindowStartTimeAsDate() {
return historyCleanupBatchWindowStartTimeAsDate;
}
public void setHistoryCleanupBatchWindowStartTimeAsDate(Date historyCleanupBatchWindowStartTimeAsDate) {
this.historyCleanupBatchWindowStartTimeAsDate = historyCleanupBatchWindowStartTimeAsDate;
}
public void setHistoryCleanupBatchWindowEndTimeAsDate(Date historyCleanupBatchWindowEndTimeAsDate) {
this.historyCleanupBatchWindowEndTimeAsDate = historyCleanupBatchWindowEndTimeAsDate;
}
public Date getHistoryCleanupBatchWindowEndTimeAsDate() {
return historyCleanupBatchWindowEndTimeAsDate;
}
public Map<Integer, BatchWindowConfiguration> getHistoryCleanupBatchWindows() {
return historyCleanupBatchWindows;
}
public void setHistoryCleanupBatchWindows(Map<Integer, BatchWindowConfiguration> historyCleanupBatchWindows) {
this.historyCleanupBatchWindows = historyCleanupBatchWindows;
}
public int getHistoryCleanupBatchSize() {
return historyCleanupBatchSize;
}
public void setHistoryCleanupBatchSize(int historyCleanupBatchSize) {
this.historyCleanupBatchSize = historyCleanupBatchSize;
}
public int getHistoryCleanupBatchThreshold() {
return historyCleanupBatchThreshold;
}
public void setHistoryCleanupBatchThreshold(int historyCleanupBatchThreshold) {
this.historyCleanupBatchThreshold = historyCleanupBatchThreshold;
}
public boolean isHistoryCleanupMetricsEnabled() {
return historyCleanupMetricsEnabled;
}
public void setHistoryCleanupMetricsEnabled(boolean historyCleanupMetricsEnabled) {
this.historyCleanupMetricsEnabled = historyCleanupMetricsEnabled;
}
public String getHistoryTimeToLive() {
return historyTimeToLive;
}
public void setHistoryTimeToLive(String historyTimeToLive) {
this.historyTimeToLive = historyTimeToLive;
}
public String getBatchOperationHistoryTimeToLive() {
return batchOperationHistoryTimeToLive;
}
public int getHistoryCleanupDegreeOfParallelism() {
return historyCleanupDegreeOfParallelism;
}
public void setHistoryCleanupDegreeOfParallelism(int historyCleanupDegreeOfParallelism) {
this.historyCleanupDegreeOfParallelism = historyCleanupDegreeOfParallelism;
}
public void setBatchOperationHistoryTimeToLive(String batchOperationHistoryTimeToLive) {
this.batchOperationHistoryTimeToLive = batchOperationHistoryTimeToLive;
}
public Map<String, String> getBatchOperationsForHistoryCleanup() {
return batchOperationsForHistoryCleanup;
}
public void setBatchOperationsForHistoryCleanup(Map<String, String> batchOperationsForHistoryCleanup) {
this.batchOperationsForHistoryCleanup = batchOperationsForHistoryCleanup;
}
public Map<String, Integer> getParsedBatchOperationsForHistoryCleanup() {
return parsedBatchOperationsForHistoryCleanup;
}
public void setParsedBatchOperationsForHistoryCleanup(Map<String, Integer> parsedBatchOperationsForHistoryCleanup) {
this.parsedBatchOperationsForHistoryCleanup = parsedBatchOperationsForHistoryCleanup;
}
public BatchWindowManager getBatchWindowManager() {
return batchWindowManager;
}
public void setBatchWindowManager(BatchWindowManager batchWindowManager) {
this.batchWindowManager = batchWindowManager;
}
public HistoryRemovalTimeProvider getHistoryRemovalTimeProvider() {
return historyRemovalTimeProvider;
}
public ProcessEngineConfigurationImpl setHistoryRemovalTimeProvider(HistoryRemovalTimeProvider removalTimeProvider) {
historyRemovalTimeProvider = removalTimeProvider;
return this;
}
public String getHistoryRemovalTimeStrategy() {
return historyRemovalTimeStrategy;
}
public ProcessEngineConfigurationImpl setHistoryRemovalTimeStrategy(String removalTimeStrategy) {
historyRemovalTimeStrategy = removalTimeStrategy;
return this;
}
public String getHistoryCleanupStrategy() {
return historyCleanupStrategy;
}
public ProcessEngineConfigurationImpl setHistoryCleanupStrategy(String historyCleanupStrategy) {
this.historyCleanupStrategy = historyCleanupStrategy;
return this;
}
public int getFailedJobListenerMaxRetries() {
return failedJobListenerMaxRetries;
}
public void setFailedJobListenerMaxRetries(int failedJobListenerMaxRetries) {
this.failedJobListenerMaxRetries = failedJobListenerMaxRetries;
}
public String getFailedJobRetryTimeCycle() {
return failedJobRetryTimeCycle;
}
public void setFailedJobRetryTimeCycle(String failedJobRetryTimeCycle) {
this.failedJobRetryTimeCycle = failedJobRetryTimeCycle;
}
public int getLoginMaxAttempts() {
return loginMaxAttempts;
}
public void setLoginMaxAttempts(int loginMaxAttempts) {
this.loginMaxAttempts = loginMaxAttempts;
}
public int getLoginDelayFactor() {
return loginDelayFactor;
}
public void setLoginDelayFactor(int loginDelayFactor) {
this.loginDelayFactor = loginDelayFactor;
}
public int getLoginDelayMaxTime() {
return loginDelayMaxTime;
}
public void setLoginDelayMaxTime(int loginDelayMaxTime) {
this.loginDelayMaxTime = loginDelayMaxTime;
}
public int getLoginDelayBase() {
return loginDelayBase;
}
public void setLoginDelayBase(int loginInitialDelay) {
this.loginDelayBase = loginInitialDelay;
}
public List<String> getAdminGroups() {
return adminGroups;
}
public void setAdminGroups(List<String> adminGroups) {
this.adminGroups = adminGroups;
}
public List<String> getAdminUsers() {
return adminUsers;
}
public void setAdminUsers(List<String> adminUsers) {
this.adminUsers = adminUsers;
}
public int getQueryMaxResultsLimit() {
return queryMaxResultsLimit;
}
public ProcessEngineConfigurationImpl setQueryMaxResultsLimit(int queryMaxResultsLimit) {
this.queryMaxResultsLimit = queryMaxResultsLimit;
return this;
}
public String getLoggingContextActivityId() {
return loggingContextActivityId;
}
public ProcessEngineConfigurationImpl setLoggingContextActivityId(String loggingContextActivityId) {
this.loggingContextActivityId = loggingContextActivityId;
return this;
}
public String getLoggingContextApplicationName() {
return loggingContextApplicationName;
}
public ProcessEngineConfigurationImpl setLoggingContextApplicationName(String loggingContextApplicationName) {
this.loggingContextApplicationName = loggingContextApplicationName;
return this;
}
public String getLoggingContextBusinessKey() {
return loggingContextBusinessKey;
}
public ProcessEngineConfigurationImpl setLoggingContextBusinessKey(String loggingContextBusinessKey) {
this.loggingContextBusinessKey = loggingContextBusinessKey;
return this;
}
public String getLoggingContextProcessDefinitionId() {
return loggingContextProcessDefinitionId;
}
public ProcessEngineConfigurationImpl setLoggingContextProcessDefinitionId(String loggingContextProcessDefinitionId) {
this.loggingContextProcessDefinitionId = loggingContextProcessDefinitionId;
return this;
}
public String getLoggingContextProcessInstanceId() {
return loggingContextProcessInstanceId;
}
public ProcessEngineConfigurationImpl setLoggingContextProcessInstanceId(String loggingContextProcessInstanceId) {
this.loggingContextProcessInstanceId = loggingContextProcessInstanceId;
return this;
}
public String getLoggingContextTenantId() {
return loggingContextTenantId;
}
public ProcessEngineConfigurationImpl setLoggingContextTenantId(String loggingContextTenantId) {
this.loggingContextTenantId = loggingContextTenantId;
return this;
}
}
| 1 | 9,664 | Please do not use wildcard imports. | camunda-camunda-bpm-platform | java |
@@ -386,6 +386,7 @@ struct wlr_surface *wlr_surface_create(struct wl_resource *res,
wl_resource_post_no_memory(res);
return NULL;
}
+ wlr_log(L_DEBUG, "New wlr_surface %p (res %p)", surface, res);
surface->renderer = renderer;
surface->texture = wlr_render_texture_create(renderer);
surface->resource = res; | 1 | #include <assert.h>
#include <stdlib.h>
#include <wayland-server.h>
#include <wlr/util/log.h>
#include <wlr/egl.h>
#include <wlr/render/interface.h>
#include <wlr/types/wlr_surface.h>
#include <wlr/render/matrix.h>
static void surface_destroy(struct wl_client *client, struct wl_resource *resource) {
wl_resource_destroy(resource);
}
static void surface_attach(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *buffer, int32_t sx, int32_t sy) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending.invalid |= WLR_SURFACE_INVALID_BUFFER;
surface->pending.buffer = buffer;
}
static void surface_damage(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width, int32_t height) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending.invalid |= WLR_SURFACE_INVALID_SURFACE_DAMAGE;
pixman_region32_union_rect(&surface->pending.surface_damage,
&surface->pending.surface_damage,
x, y, width, height);
}
static void destroy_frame_callback(struct wl_resource *resource) {
struct wlr_frame_callback *cb = wl_resource_get_user_data(resource);
wl_list_remove(&cb->link);
free(cb);
}
static void surface_frame(struct wl_client *client,
struct wl_resource *resource, uint32_t callback) {
struct wlr_frame_callback *cb;
struct wlr_surface *surface = wl_resource_get_user_data(resource);
cb = malloc(sizeof(struct wlr_frame_callback));
if (cb == NULL) {
wl_resource_post_no_memory(resource);
return;
}
cb->resource = wl_resource_create(client,
&wl_callback_interface, 1, callback);
if (cb->resource == NULL) {
free(cb);
wl_resource_post_no_memory(resource);
return;
}
wl_resource_set_implementation(cb->resource,
NULL, cb, destroy_frame_callback);
wl_list_insert(surface->frame_callback_list.prev, &cb->link);
}
static void surface_set_opaque_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if ((surface->pending.invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
pixman_region32_clear(&surface->pending.opaque);
}
surface->pending.invalid |= WLR_SURFACE_INVALID_OPAQUE_REGION;
if (region_resource) {
pixman_region32_t *region = wl_resource_get_user_data(region_resource);
pixman_region32_copy(&surface->pending.opaque, region);
} else {
pixman_region32_clear(&surface->pending.opaque);
}
}
static void surface_set_input_region(struct wl_client *client,
struct wl_resource *resource,
struct wl_resource *region_resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if ((surface->pending.invalid & WLR_SURFACE_INVALID_INPUT_REGION)) {
pixman_region32_clear(&surface->pending.input);
}
surface->pending.invalid |= WLR_SURFACE_INVALID_INPUT_REGION;
if (region_resource) {
pixman_region32_t *region = wl_resource_get_user_data(region_resource);
pixman_region32_copy(&surface->pending.input, region);
} else {
pixman_region32_init_rect(&surface->pending.input,
INT32_MIN, INT32_MIN, UINT32_MAX, UINT32_MAX);
}
}
static void wlr_surface_update_size(struct wlr_surface *surface) {
int scale = surface->current.scale;
enum wl_output_transform transform = surface->current.transform;
wlr_texture_get_buffer_size(surface->texture, surface->current.buffer,
&surface->current.buffer_width, &surface->current.buffer_height);
int _width = surface->current.buffer_width / scale;
int _height = surface->current.buffer_height / scale;
if (transform == WL_OUTPUT_TRANSFORM_90 ||
transform == WL_OUTPUT_TRANSFORM_270 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_90 ||
transform == WL_OUTPUT_TRANSFORM_FLIPPED_270) {
int tmp = _width;
_width = _height;
_height = tmp;
}
surface->current.width = _width;
surface->current.height = _height;
}
static void wlr_surface_to_buffer_region(struct wlr_surface *surface,
pixman_region32_t *surface_region, pixman_region32_t *buffer_region,
int width, int height) {
pixman_box32_t *src_rects, *dest_rects;
int nrects, i;
int scale = surface->current.scale;
enum wl_output_transform transform = surface->current.transform;
src_rects = pixman_region32_rectangles(surface_region, &nrects);
dest_rects = malloc(nrects * sizeof(*dest_rects));
if (!dest_rects) {
return;
}
for (i = 0; i < nrects; i++) {
switch (transform) {
default:
case WL_OUTPUT_TRANSFORM_NORMAL:
dest_rects[i].x1 = src_rects[i].x1;
dest_rects[i].y1 = src_rects[i].y1;
dest_rects[i].x2 = src_rects[i].x2;
dest_rects[i].y2 = src_rects[i].y2;
break;
case WL_OUTPUT_TRANSFORM_90:
dest_rects[i].x1 = height - src_rects[i].y2;
dest_rects[i].y1 = src_rects[i].x1;
dest_rects[i].x2 = height - src_rects[i].y1;
dest_rects[i].y2 = src_rects[i].x2;
break;
case WL_OUTPUT_TRANSFORM_180:
dest_rects[i].x1 = width - src_rects[i].x2;
dest_rects[i].y1 = height - src_rects[i].y2;
dest_rects[i].x2 = width - src_rects[i].x1;
dest_rects[i].y2 = height - src_rects[i].y1;
break;
case WL_OUTPUT_TRANSFORM_270:
dest_rects[i].x1 = src_rects[i].y1;
dest_rects[i].y1 = width - src_rects[i].x2;
dest_rects[i].x2 = src_rects[i].y2;
dest_rects[i].y2 = width - src_rects[i].x1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED:
dest_rects[i].x1 = width - src_rects[i].x2;
dest_rects[i].y1 = src_rects[i].y1;
dest_rects[i].x2 = width - src_rects[i].x1;
dest_rects[i].y2 = src_rects[i].y2;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_90:
dest_rects[i].x1 = height - src_rects[i].y2;
dest_rects[i].y1 = width - src_rects[i].x2;
dest_rects[i].x2 = height - src_rects[i].y1;
dest_rects[i].y2 = width - src_rects[i].x1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_180:
dest_rects[i].x1 = src_rects[i].x1;
dest_rects[i].y1 = height - src_rects[i].y2;
dest_rects[i].x2 = src_rects[i].x2;
dest_rects[i].y2 = height - src_rects[i].y1;
break;
case WL_OUTPUT_TRANSFORM_FLIPPED_270:
dest_rects[i].x1 = src_rects[i].y1;
dest_rects[i].y1 = src_rects[i].x1;
dest_rects[i].x2 = src_rects[i].y2;
dest_rects[i].y2 = src_rects[i].x2;
break;
}
}
if (scale != 1) {
for (i = 0; i < nrects; i++) {
dest_rects[i].x1 *= scale;
dest_rects[i].x2 *= scale;
dest_rects[i].y1 *= scale;
dest_rects[i].y2 *= scale;
}
}
pixman_region32_fini(buffer_region);
pixman_region32_init_rects(buffer_region, dest_rects, nrects);
free(dest_rects);
}
static void surface_commit(struct wl_client *client,
struct wl_resource *resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
bool update_size = false;
bool update_damage = false;
if ((surface->pending.invalid & WLR_SURFACE_INVALID_SCALE)) {
surface->current.scale = surface->pending.scale;
update_size = true;
}
if ((surface->pending.invalid & WLR_SURFACE_INVALID_TRANSFORM)) {
surface->current.transform = surface->pending.transform;
update_size = true;
}
if ((surface->pending.invalid & WLR_SURFACE_INVALID_BUFFER)) {
surface->current.buffer = surface->pending.buffer;
update_size = true;
}
if (update_size) {
int32_t oldw = surface->current.buffer_width;
int32_t oldh = surface->current.buffer_height;
wlr_surface_update_size(surface);
surface->reupload_buffer = oldw != surface->current.buffer_width ||
oldh != surface->current.buffer_height;
}
if ((surface->pending.invalid & WLR_SURFACE_INVALID_SURFACE_DAMAGE)) {
pixman_region32_union(&surface->current.surface_damage,
&surface->current.surface_damage,
&surface->pending.surface_damage);
pixman_region32_intersect_rect(&surface->current.surface_damage,
&surface->current.surface_damage, 0, 0, surface->current.width,
surface->current.height);
pixman_region32_clear(&surface->pending.surface_damage);
update_damage = true;
}
if ((surface->pending.invalid & WLR_SURFACE_INVALID_BUFFER_DAMAGE)) {
pixman_region32_union(&surface->current.buffer_damage,
&surface->current.buffer_damage,
&surface->pending.buffer_damage);
pixman_region32_clear(&surface->pending.buffer_damage);
update_damage = true;
}
if (update_damage) {
pixman_region32_t buffer_damage;
pixman_region32_init(&buffer_damage);
wlr_surface_to_buffer_region(surface, &surface->current.surface_damage,
&buffer_damage, surface->current.width, surface->current.height);
pixman_region32_union(&surface->current.buffer_damage,
&surface->current.buffer_damage, &buffer_damage);
pixman_region32_fini(&buffer_damage);
pixman_region32_intersect_rect(&surface->current.buffer_damage,
&surface->current.buffer_damage, 0, 0,
surface->current.buffer_width, surface->current.buffer_height);
}
if ((surface->pending.invalid & WLR_SURFACE_INVALID_OPAQUE_REGION)) {
// TODO: process buffer
pixman_region32_clear(&surface->pending.opaque);
}
if ((surface->pending.invalid & WLR_SURFACE_INVALID_INPUT_REGION)) {
// TODO: process buffer
pixman_region32_clear(&surface->pending.input);
}
surface->pending.invalid = 0;
// TODO: add the invalid bitfield to this callback
wl_signal_emit(&surface->signals.commit, surface);
}
void wlr_surface_flush_damage(struct wlr_surface *surface) {
if (!surface->current.buffer) {
if (surface->texture->valid) {
// TODO: Detach buffers
}
return;
}
struct wl_shm_buffer *buffer = wl_shm_buffer_get(surface->current.buffer);
if (!buffer) {
if (wlr_renderer_buffer_is_drm(surface->renderer, surface->pending.buffer)) {
wlr_texture_upload_drm(surface->texture, surface->pending.buffer);
goto release;
} else {
wlr_log(L_INFO, "Unknown buffer handle attached");
return;
}
}
uint32_t format = wl_shm_buffer_get_format(buffer);
if (surface->reupload_buffer) {
wlr_texture_upload_shm(surface->texture, format, buffer);
} else {
pixman_region32_t damage = surface->current.buffer_damage;
if (!pixman_region32_not_empty(&damage)) {
goto release;
}
int n;
pixman_box32_t *rects = pixman_region32_rectangles(&damage, &n);
for (int i = 0; i < n; ++i) {
pixman_box32_t rect = rects[i];
if (!wlr_texture_update_shm(surface->texture, format,
rect.x1, rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1,
buffer)) {
break;
}
}
}
release:
pixman_region32_clear(&surface->current.surface_damage);
pixman_region32_clear(&surface->current.buffer_damage);
wl_resource_post_event(surface->current.buffer, WL_BUFFER_RELEASE);
surface->current.buffer = NULL;
}
static void surface_set_buffer_transform(struct wl_client *client,
struct wl_resource *resource, int transform) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending.invalid |= WLR_SURFACE_INVALID_TRANSFORM;
surface->pending.transform = transform;
}
static void surface_set_buffer_scale(struct wl_client *client,
struct wl_resource *resource,
int32_t scale) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
surface->pending.invalid |= WLR_SURFACE_INVALID_SCALE;
surface->pending.scale = scale;
}
static void surface_damage_buffer(struct wl_client *client,
struct wl_resource *resource,
int32_t x, int32_t y, int32_t width,
int32_t height) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
if (width < 0 || height < 0) {
return;
}
surface->pending.invalid |= WLR_SURFACE_INVALID_BUFFER_DAMAGE;
pixman_region32_union_rect(&surface->pending.buffer_damage,
&surface->pending.buffer_damage,
x, y, width, height);
}
const struct wl_surface_interface surface_interface = {
.destroy = surface_destroy,
.attach = surface_attach,
.damage = surface_damage,
.frame = surface_frame,
.set_opaque_region = surface_set_opaque_region,
.set_input_region = surface_set_input_region,
.commit = surface_commit,
.set_buffer_transform = surface_set_buffer_transform,
.set_buffer_scale = surface_set_buffer_scale,
.damage_buffer = surface_damage_buffer
};
static void destroy_surface(struct wl_resource *resource) {
struct wlr_surface *surface = wl_resource_get_user_data(resource);
wlr_texture_destroy(surface->texture);
struct wlr_frame_callback *cb, *next;
wl_list_for_each_safe(cb, next, &surface->frame_callback_list, link) {
wl_resource_destroy(cb->resource);
}
pixman_region32_fini(&surface->pending.surface_damage);
pixman_region32_fini(&surface->pending.buffer_damage);
pixman_region32_fini(&surface->pending.opaque);
pixman_region32_fini(&surface->pending.input);
free(surface);
}
struct wlr_surface *wlr_surface_create(struct wl_resource *res,
struct wlr_renderer *renderer) {
struct wlr_surface *surface;
if (!(surface = calloc(1, sizeof(struct wlr_surface)))) {
wl_resource_post_no_memory(res);
return NULL;
}
surface->renderer = renderer;
surface->texture = wlr_render_texture_create(renderer);
surface->resource = res;
surface->current.scale = 1;
surface->pending.scale = 1;
surface->current.transform = WL_OUTPUT_TRANSFORM_NORMAL;
surface->pending.transform = WL_OUTPUT_TRANSFORM_NORMAL;
pixman_region32_init(&surface->pending.surface_damage);
pixman_region32_init(&surface->pending.buffer_damage);
pixman_region32_init(&surface->pending.opaque);
pixman_region32_init(&surface->pending.input);
wl_signal_init(&surface->signals.commit);
wl_list_init(&surface->frame_callback_list);
wl_resource_set_implementation(res, &surface_interface,
surface, destroy_surface);
return surface;
}
void wlr_surface_get_matrix(struct wlr_surface *surface,
float (*matrix)[16],
const float (*projection)[16],
const float (*transform)[16]) {
int width = surface->texture->width / surface->current.scale;
int height = surface->texture->height / surface->current.scale;
float scale[16];
wlr_matrix_identity(matrix);
if (transform) {
wlr_matrix_mul(matrix, transform, matrix);
}
wlr_matrix_scale(&scale, width, height, 1);
wlr_matrix_mul(matrix, &scale, matrix);
wlr_matrix_mul(projection, matrix, matrix);
}
| 1 | 7,850 | Not sure about these added logs, as said in the commit message it's probably not something we want all the time, but it helped me debug a bit. | swaywm-wlroots | c |
@@ -487,8 +487,6 @@ class Comparison(ComparisonInterface):
paths2 = el2.split()
if len(paths1) != len(paths2):
raise cls.failureException("%s objects do not have a matching number of paths." % msg)
- for p1, p2 in zip(paths1, paths2):
- cls.compare_dataset(p1, p2, '%s data' % msg)
@classmethod
def compare_contours(cls, el1, el2, msg='Contours'): | 1 | """
Helper classes for comparing the equality of two HoloViews objects.
These classes are designed to integrate with unittest.TestCase (see
the tests directory) while making equality testing easily accessible
to the user.
For instance, to test if two Matrix objects are equal you can use:
Comparison.assertEqual(matrix1, matrix2)
This will raise an AssertionError if the two matrix objects are not
equal, including information regarding what exactly failed to match.
Note that this functionality could not be provided using comparison
methods on all objects as comparison operators only return Booleans and
thus would not supply any information regarding *why* two elements are
considered different.
"""
from functools import partial
import numpy as np
from unittest.util import safe_repr
from unittest import TestCase
from numpy.testing import assert_array_equal, assert_array_almost_equal
from . import * # noqa (All Elements need to support comparison)
from ..core import (Element, Empty, AdjointLayout, Overlay, Dimension,
HoloMap, Dimensioned, Layout, NdLayout, NdOverlay,
GridSpace, DynamicMap, GridMatrix, OrderedDict)
from ..core.options import Options, Cycle
from ..core.util import pd, datetime_types, dt_to_int
class ComparisonInterface(object):
"""
This class is designed to allow equality testing to work
seamlessly with unittest.TestCase as a mix-in by implementing a
compatible interface (namely the assertEqual method).
The assertEqual class method is to be overridden by an instance
method of the same name when used as a mix-in with TestCase. The
contents of the equality_type_funcs dictionary is suitable for use
with TestCase.addTypeEqualityFunc.
"""
equality_type_funcs = {}
failureException = AssertionError
@classmethod
def simple_equality(cls, first, second, msg=None):
"""
Classmethod equivalent to unittest.TestCase method (longMessage = False.)
"""
if not first==second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
raise cls.failureException(msg or standardMsg)
@classmethod
def assertEqual(cls, first, second, msg=None):
"""
Classmethod equivalent to unittest.TestCase method
"""
asserter = None
if type(first) is type(second):
asserter = cls.equality_type_funcs.get(type(first))
try: basestring = basestring # Python 2
except NameError: basestring = str # Python 3
if asserter is not None:
if isinstance(asserter, basestring):
asserter = getattr(cls, asserter)
if asserter is None:
asserter = cls.simple_equality
if msg is None:
asserter(first, second)
else:
asserter(first, second, msg=msg)
class Comparison(ComparisonInterface):
"""
Class used for comparing two HoloViews objects, including complex
composite objects. Comparisons are available as classmethods, the
most general being the assertEqual method that is intended to work
with any input.
For instance, to test if two Image objects are equal you can use:
Comparison.assertEqual(matrix1, matrix2)
"""
# someone might prefer to use a different function, e.g. assert_all_close
assert_array_almost_equal_fn = partial(assert_array_almost_equal, decimal=6)
@classmethod
def register(cls):
# Float comparisons
cls.equality_type_funcs[float] = cls.compare_floats
cls.equality_type_funcs[np.float] = cls.compare_floats
cls.equality_type_funcs[np.float32] = cls.compare_floats
cls.equality_type_funcs[np.float64] = cls.compare_floats
# List and tuple comparisons
cls.equality_type_funcs[list] = cls.compare_lists
cls.equality_type_funcs[tuple] = cls.compare_tuples
# Dictionary comparisons
cls.equality_type_funcs[dict] = cls.compare_dictionaries
cls.equality_type_funcs[OrderedDict] = cls.compare_dictionaries
# Numpy array comparison
cls.equality_type_funcs[np.ndarray] = cls.compare_arrays
cls.equality_type_funcs[np.ma.masked_array] = cls.compare_arrays
# Pandas dataframe comparison
if pd:
cls.equality_type_funcs[pd.DataFrame] = cls.compare_dataframe
# Dimension objects
cls.equality_type_funcs[Dimension] = cls.compare_dimensions
cls.equality_type_funcs[Dimensioned] = cls.compare_dimensioned # Used in unit tests
cls.equality_type_funcs[Element] = cls.compare_elements # Used in unit tests
# Composition (+ and *)
cls.equality_type_funcs[Overlay] = cls.compare_overlays
cls.equality_type_funcs[Layout] = cls.compare_layouttrees
cls.equality_type_funcs[Empty] = cls.compare_empties
# Annotations
cls.equality_type_funcs[VLine] = cls.compare_vline
cls.equality_type_funcs[HLine] = cls.compare_hline
cls.equality_type_funcs[VSpan] = cls.compare_vspan
cls.equality_type_funcs[HSpan] = cls.compare_hspan
cls.equality_type_funcs[Spline] = cls.compare_spline
cls.equality_type_funcs[Arrow] = cls.compare_arrow
cls.equality_type_funcs[Text] = cls.compare_text
cls.equality_type_funcs[Div] = cls.compare_div
# Path comparisons
cls.equality_type_funcs[Path] = cls.compare_paths
cls.equality_type_funcs[Contours] = cls.compare_contours
cls.equality_type_funcs[Polygons] = cls.compare_polygons
cls.equality_type_funcs[Box] = cls.compare_box
cls.equality_type_funcs[Ellipse] = cls.compare_ellipse
cls.equality_type_funcs[Bounds] = cls.compare_bounds
# Rasters
cls.equality_type_funcs[Image] = cls.compare_image
cls.equality_type_funcs[RGB] = cls.compare_rgb
cls.equality_type_funcs[HSV] = cls.compare_hsv
cls.equality_type_funcs[Raster] = cls.compare_raster
cls.equality_type_funcs[QuadMesh] = cls.compare_quadmesh
cls.equality_type_funcs[Surface] = cls.compare_surface
cls.equality_type_funcs[HeatMap] = cls.compare_dataset
# Charts
cls.equality_type_funcs[Dataset] = cls.compare_dataset
cls.equality_type_funcs[Curve] = cls.compare_curve
cls.equality_type_funcs[ErrorBars] = cls.compare_errorbars
cls.equality_type_funcs[Spread] = cls.compare_spread
cls.equality_type_funcs[Area] = cls.compare_area
cls.equality_type_funcs[Scatter] = cls.compare_scatter
cls.equality_type_funcs[Scatter3D] = cls.compare_scatter3d
cls.equality_type_funcs[TriSurface] = cls.compare_trisurface
cls.equality_type_funcs[Trisurface] = cls.compare_trisurface
cls.equality_type_funcs[Histogram] = cls.compare_histogram
cls.equality_type_funcs[Bars] = cls.compare_bars
cls.equality_type_funcs[Spikes] = cls.compare_spikes
cls.equality_type_funcs[BoxWhisker] = cls.compare_boxwhisker
cls.equality_type_funcs[VectorField] = cls.compare_vectorfield
# Graphs
cls.equality_type_funcs[Graph] = cls.compare_graph
cls.equality_type_funcs[Nodes] = cls.compare_nodes
cls.equality_type_funcs[EdgePaths] = cls.compare_edgepaths
cls.equality_type_funcs[TriMesh] = cls.compare_trimesh
# Tables
cls.equality_type_funcs[ItemTable] = cls.compare_itemtables
cls.equality_type_funcs[Table] = cls.compare_tables
cls.equality_type_funcs[Points] = cls.compare_points
# Statistical
cls.equality_type_funcs[Bivariate] = cls.compare_bivariate
cls.equality_type_funcs[Distribution] = cls.compare_distribution
cls.equality_type_funcs[HexTiles] = cls.compare_hextiles
# NdMappings
cls.equality_type_funcs[NdLayout] = cls.compare_gridlayout
cls.equality_type_funcs[AdjointLayout] = cls.compare_adjointlayouts
cls.equality_type_funcs[NdOverlay] = cls.compare_ndoverlays
cls.equality_type_funcs[GridSpace] = cls.compare_grids
cls.equality_type_funcs[GridMatrix] = cls.compare_grids
cls.equality_type_funcs[HoloMap] = cls.compare_holomap
cls.equality_type_funcs[DynamicMap] = cls.compare_dynamicmap
# Option objects
cls.equality_type_funcs[Options] = cls.compare_options
cls.equality_type_funcs[Cycle] = cls.compare_cycles
return cls.equality_type_funcs
@classmethod
def compare_dictionaries(cls, d1, d2, msg='Dictionaries'):
keys= set(d1.keys())
keys2 = set(d2.keys())
symmetric_diff = keys ^ keys2
if symmetric_diff:
msg = ("Dictionaries have different sets of keys: %r\n\n"
% symmetric_diff)
msg += "Dictionary 1: %s\n" % d1
msg += "Dictionary 2: %s" % d2
raise cls.failureException(msg)
for k in keys:
cls.assertEqual(d1[k], d2[k])
@classmethod
def compare_lists(cls, l1, l2, msg=None):
try:
cls.assertEqual(len(l1), len(l2))
for v1, v2 in zip(l1, l2):
cls.assertEqual(v1, v2)
except AssertionError:
raise AssertionError(msg or '%s != %s' % (repr(l1), repr(l2)))
@classmethod
def compare_tuples(cls, t1, t2, msg=None):
try:
cls.assertEqual(len(t1), len(t2))
for i1, i2 in zip(t1, t2):
cls.assertEqual(i1, i2)
except AssertionError:
raise AssertionError(msg or '%s != %s' % (repr(t1), repr(t2)))
#=====================#
# Literal comparisons #
#=====================#
@classmethod
def compare_floats(cls, arr1, arr2, msg='Floats'):
cls.compare_arrays(arr1, arr2, msg)
@classmethod
def compare_arrays(cls, arr1, arr2, msg='Arrays'):
try:
if arr1.dtype.kind == 'M':
arr1 = arr1.astype('datetime64[ns]').astype('int64')
if arr2.dtype.kind == 'M':
arr2 = arr2.astype('datetime64[ns]').astype('int64')
assert_array_equal(arr1, arr2)
except:
try:
cls.assert_array_almost_equal_fn(arr1, arr2)
except AssertionError as e:
raise cls.failureException(msg + str(e)[11:])
@classmethod
def bounds_check(cls, el1, el2, msg=None):
lbrt1 = el1.bounds.lbrt()
lbrt2 = el2.bounds.lbrt()
try:
for v1, v2 in zip(lbrt1, lbrt2):
if isinstance(v1, datetime_types):
v1 = dt_to_int(v1)
if isinstance(v2, datetime_types):
v2 = dt_to_int(v2)
cls.assert_array_almost_equal_fn(v1, v2)
except AssertionError:
raise cls.failureException("BoundingBoxes are mismatched: %s != %s."
% (el1.bounds.lbrt(), el2.bounds.lbrt()))
#=======================================#
# Dimension and Dimensioned comparisons #
#=======================================#
@classmethod
def compare_dimensions(cls, dim1, dim2, msg=None):
# 'Weak' equality semantics
if dim1.name != dim2.name:
raise cls.failureException("Dimension names mismatched: %s != %s"
% (dim1.name, dim2.name))
if dim1.label != dim2.label:
raise cls.failureException("Dimension labels mismatched: %s != %s"
% (dim1.label, dim2.label))
# 'Deep' equality of dimension metadata (all parameters)
dim1_params = dict(dim1.get_param_values())
dim2_params = dict(dim2.get_param_values())
# Special handling of deprecated 'initial' values argument
dim1_params['values'] = [] if dim1.values=='initial' else dim1.values
dim2_params['values'] = [] if dim2.values=='initial' else dim2.values
if set(dim1_params.keys()) != set(dim2_params.keys()):
raise cls.failureException("Dimension parameter sets mismatched: %s != %s"
% (set(dim1_params.keys()), set(dim2_params.keys())))
for k in dim1_params.keys():
if (dim1.params(k).__class__.__name__ == 'Callable'
and dim2.params(k).__class__.__name__ == 'Callable'):
continue
try: # This is needed as two lists are not compared by contents using ==
cls.assertEqual(dim1_params[k], dim2_params[k], msg=None)
except AssertionError as e:
msg = 'Dimension parameter %r mismatched: ' % k
raise cls.failureException("%s%s" % (msg, str(e)))
@classmethod
def compare_labelled_data(cls, obj1, obj2, msg=None):
cls.assertEqual(obj1.group, obj2.group, "Group labels mismatched.")
cls.assertEqual(obj1.label, obj2.label, "Labels mismatched.")
@classmethod
def compare_dimension_lists(cls, dlist1, dlist2, msg='Dimension lists'):
if len(dlist1) != len(dlist2):
raise cls.failureException('%s mismatched' % msg)
for d1, d2 in zip(dlist1, dlist2):
cls.assertEqual(d1, d2)
@classmethod
def compare_dimensioned(cls, obj1, obj2, msg=None):
cls.compare_labelled_data(obj1, obj2)
cls.compare_dimension_lists(obj1.vdims, obj2.vdims,
'Value dimension list')
cls.compare_dimension_lists(obj1.kdims, obj2.kdims,
'Key dimension list')
@classmethod
def compare_elements(cls, obj1, obj2, msg=None):
cls.compare_labelled_data(obj1, obj2)
cls.assertEqual(obj1.data, obj2.data)
#===============================#
# Compositional trees (+ and *) #
#===============================#
@classmethod
def compare_trees(cls, el1, el2, msg='Trees'):
if len(el1.keys()) != len(el2.keys()):
raise cls.failureException("%s have mismatched path counts." % msg)
if el1.keys() != el2.keys():
raise cls.failureException("%s have mismatched paths." % msg)
for element1, element2 in zip(el1.values(), el2.values()):
cls.assertEqual(element1, element2)
@classmethod
def compare_layouttrees(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
cls.compare_trees(el1, el2, msg='Layouts')
@classmethod
def compare_empties(cls, el1, el2, msg=None):
if not all(isinstance(el, Empty) for el in [el1, el2]):
raise cls.failureException("Compared elements are not both Empty()")
@classmethod
def compare_overlays(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
cls.compare_trees(el1, el2, msg='Overlays')
#================================#
# AttrTree and Map based classes #
#================================#
@classmethod
def compare_ndmappings(cls, el1, el2, msg='NdMappings'):
cls.compare_dimensioned(el1, el2)
if len(el1.keys()) != len(el2.keys()):
raise cls.failureException("%s have different numbers of keys." % msg)
if set(el1.keys()) != set(el2.keys()):
diff1 = [el for el in el1.keys() if el not in el2.keys()]
diff2 = [el for el in el2.keys() if el not in el1.keys()]
raise cls.failureException("%s have different sets of keys. " % msg
+ "In first, not second %s. " % diff1
+ "In second, not first: %s." % diff2)
for element1, element2 in zip(el1, el2):
cls.assertEqual(element1, element2)
@classmethod
def compare_holomap(cls, el1, el2, msg='HoloMaps'):
cls.compare_dimensioned(el1, el2)
cls.compare_ndmappings(el1, el2, msg)
@classmethod
def compare_dynamicmap(cls, el1, el2, msg='DynamicMap'):
cls.compare_dimensioned(el1, el2)
cls.compare_ndmappings(el1, el2, msg)
@classmethod
def compare_gridlayout(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
if len(el1) != len(el2):
raise cls.failureException("Layouts have different sizes.")
if set(el1.keys()) != set(el2.keys()):
raise cls.failureException("Layouts have different keys.")
for element1, element2 in zip(el1, el2):
cls.assertEqual(element1,element2)
@classmethod
def compare_ndoverlays(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
if len(el1) != len(el2):
raise cls.failureException("NdOverlays have different lengths.")
for (layer1, layer2) in zip(el1, el2):
cls.assertEqual(layer1, layer2)
@classmethod
def compare_adjointlayouts(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
for element1, element2 in zip(el1, el1):
cls.assertEqual(element1, element2)
#=============#
# Annotations #
#=============#
@classmethod
def compare_annotation(cls, el1, el2, msg='Annotation'):
cls.compare_dimensioned(el1, el2)
cls.assertEqual(el1.data, el2.data)
@classmethod
def compare_hline(cls, el1, el2, msg='HLine'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_vline(cls, el1, el2, msg='VLine'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_vspan(cls, el1, el2, msg='VSpan'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_hspan(cls, el1, el2, msg='HSpan'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_spline(cls, el1, el2, msg='Spline'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_arrow(cls, el1, el2, msg='Arrow'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_text(cls, el1, el2, msg='Text'):
cls.compare_annotation(el1, el2, msg=msg)
@classmethod
def compare_div(cls, el1, el2, msg='Div'):
cls.compare_annotation(el1, el2, msg=msg)
#=======#
# Paths #
#=======#
@classmethod
def compare_paths(cls, el1, el2, msg='Path'):
cls.compare_dataset(el1, el2, msg)
paths1 = el1.split()
paths2 = el2.split()
if len(paths1) != len(paths2):
raise cls.failureException("%s objects do not have a matching number of paths." % msg)
for p1, p2 in zip(paths1, paths2):
cls.compare_dataset(p1, p2, '%s data' % msg)
@classmethod
def compare_contours(cls, el1, el2, msg='Contours'):
cls.compare_paths(el1, el2, msg=msg)
@classmethod
def compare_polygons(cls, el1, el2, msg='Polygons'):
cls.compare_paths(el1, el2, msg=msg)
@classmethod
def compare_box(cls, el1, el2, msg='Box'):
cls.compare_paths(el1, el2, msg=msg)
@classmethod
def compare_ellipse(cls, el1, el2, msg='Ellipse'):
cls.compare_paths(el1, el2, msg=msg)
@classmethod
def compare_bounds(cls, el1, el2, msg='Bounds'):
cls.compare_paths(el1, el2, msg=msg)
#========#
# Charts #
#========#
@classmethod
def compare_dataset(cls, el1, el2, msg='Dataset'):
cls.compare_dimensioned(el1, el2)
if el1.shape[0] != el2.shape[0]:
raise AssertionError("%s not of matching length." % msg)
dimension_data = [(d, el1[d], el2[d]) for d in el1.dimensions()]
for dim, d1, d2 in dimension_data:
if d1.dtype != d2.dtype:
cls.failureException("%s %s columns have different type." % (msg, dim.pprint_label)
+ " First has type %s, and second has type %s."
% (d1, d2))
if d1.dtype.kind in 'SUOV':
if list(d1) == list(d2):
cls.failureException("%s along dimension %s not equal." %
(msg, dim.pprint_label))
else:
cls.compare_arrays(d1, d2, msg)
@classmethod
def compare_curve(cls, el1, el2, msg='Curve'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_errorbars(cls, el1, el2, msg='ErrorBars'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_spread(cls, el1, el2, msg='Spread'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_area(cls, el1, el2, msg='Area'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_scatter(cls, el1, el2, msg='Scatter'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_scatter3d(cls, el1, el2, msg='Scatter3D'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_trisurface(cls, el1, el2, msg='TriSurface'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_histogram(cls, el1, el2, msg='Histogram'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_points(cls, el1, el2, msg='Points'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_vectorfield(cls, el1, el2, msg='VectorField'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_bars(cls, el1, el2, msg='Bars'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_spikes(cls, el1, el2, msg='Spikes'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_boxwhisker(cls, el1, el2, msg='BoxWhisker'):
cls.compare_dataset(el1, el2, msg)
#=========#
# Graphs #
#=========#
@classmethod
def compare_graph(cls, el1, el2, msg='Graph'):
cls.compare_dataset(el1, el2, msg)
cls.compare_nodes(el1.nodes, el2.nodes, msg)
if el1._edgepaths or el2._edgepaths:
cls.compare_edgepaths(el1.edgepaths, el2.edgepaths, msg)
@classmethod
def compare_trimesh(cls, el1, el2, msg='TriMesh'):
cls.compare_graph(el1, el2, msg)
@classmethod
def compare_nodes(cls, el1, el2, msg='Nodes'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_edgepaths(cls, el1, el2, msg='Nodes'):
cls.compare_paths(el1, el2, msg)
#=========#
# Rasters #
#=========#
@classmethod
def compare_raster(cls, el1, el2, msg='Raster'):
cls.compare_dimensioned(el1, el2)
cls.compare_arrays(el1.data, el2.data, msg)
@classmethod
def compare_quadmesh(cls, el1, el2, msg='QuadMesh'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_heatmap(cls, el1, el2, msg='HeatMap'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_image(cls, el1, el2, msg='Image'):
cls.bounds_check(el1,el2)
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_rgb(cls, el1, el2, msg='RGB'):
cls.bounds_check(el1,el2)
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_hsv(cls, el1, el2, msg='HSV'):
cls.bounds_check(el1,el2)
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_surface(cls, el1, el2, msg='Surface'):
cls.bounds_check(el1,el2)
cls.compare_dataset(el1, el2, msg)
#========#
# Tables #
#========#
@classmethod
def compare_itemtables(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
if el1.rows != el2.rows:
raise cls.failureException("ItemTables have different numbers of rows.")
if el1.cols != el2.cols:
raise cls.failureException("ItemTables have different numbers of columns.")
if [d.name for d in el1.vdims] != [d.name for d in el2.vdims]:
raise cls.failureException("ItemTables have different Dimensions.")
@classmethod
def compare_tables(cls, el1, el2, msg='Table'):
cls.compare_dataset(el1, el2, msg)
#========#
# Pandas #
#========#
@classmethod
def compare_dataframe(cls, df1, df2, msg='DFrame'):
from pandas.util.testing import assert_frame_equal
try:
assert_frame_equal(df1, df2)
except AssertionError as e:
raise cls.failureException(msg+': '+str(e))
#============#
# Statistics #
#============#
@classmethod
def compare_distribution(cls, el1, el2, msg='Distribution'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_bivariate(cls, el1, el2, msg='Bivariate'):
cls.compare_dataset(el1, el2, msg)
@classmethod
def compare_hextiles(cls, el1, el2, msg='HexTiles'):
cls.compare_dataset(el1, el2, msg)
#=======#
# Grids #
#=======#
@classmethod
def _compare_grids(cls, el1, el2, name):
if len(el1.keys()) != len(el2.keys()):
raise cls.failureException("%ss have different numbers of items." % name)
if set(el1.keys()) != set(el2.keys()):
raise cls.failureException("%ss have different keys." % name)
if len(el1) != len(el2):
raise cls.failureException("%ss have different depths." % name)
for element1, element2 in zip(el1, el2):
cls.assertEqual(element1, element2)
@classmethod
def compare_grids(cls, el1, el2, msg=None):
cls.compare_dimensioned(el1, el2)
cls._compare_grids(el1, el2, 'GridSpace')
#=========#
# Options #
#=========#
@classmethod
def compare_options(cls, options1, options2, msg=None):
cls.assertEqual(options1.kwargs, options2.kwargs)
@classmethod
def compare_cycles(cls, cycle1, cycle2, msg=None):
cls.assertEqual(cycle1.values, cycle2.values)
@classmethod
def compare_channelopts(cls, opt1, opt2, msg=None):
cls.assertEqual(opt1.mode, opt2.mode)
cls.assertEqual(opt1.pattern, opt2.pattern)
cls.assertEqual(opt1.patter, opt2.pattern)
class ComparisonTestCase(Comparison, TestCase):
"""
Class to integrate the Comparison class with unittest.TestCase.
"""
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
registry = Comparison.register()
for k, v in registry.items():
self.addTypeEqualityFunc(k, v)
| 1 | 23,378 | Should there be an equivalent check or is it ok to remove this comparison? | holoviz-holoviews | py |
@@ -57,7 +57,7 @@ def run_migrations_online():
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
- # reference: http://alembic.readthedocs.org/en/latest/cookbook.html
+ # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0] | 1 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 1 | 15,634 | sorry, why this change? | quiltdata-quilt | py |
@@ -82,7 +82,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase {
*
* @throws Exception When analysis fails.
*/
- protected abstract NamedList doAnalysis(SolrQueryRequest req) throws Exception;
+ protected abstract NamedList<?> doAnalysis(SolrQueryRequest req) throws Exception;
/**
* Analyzes the given value using the given Analyzer. | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.analysis.util.CharFilterFactory;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.apache.solr.analysis.TokenizerChain;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.schema.FieldType;
/**
* A base class for all analysis request handlers.
*
*
* @since solr 1.4
*/
public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase {
public static final Set<BytesRef> EMPTY_BYTES_SET = Collections.emptySet();
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
rsp.add("analysis", doAnalysis(req));
}
/**
* Performs the analysis based on the given solr request and returns the analysis result as a named list.
*
* @param req The solr request.
*
* @return The analysis result as a named list.
*
* @throws Exception When analysis fails.
*/
protected abstract NamedList doAnalysis(SolrQueryRequest req) throws Exception;
/**
* Analyzes the given value using the given Analyzer.
*
* @param value Value to analyze
* @param context The {@link AnalysisContext analysis context}.
*
* @return NamedList containing the tokens produced by analyzing the given value
*/
protected NamedList<? extends Object> analyzeValue(String value, AnalysisContext context) {
Analyzer analyzer = context.getAnalyzer();
if (!TokenizerChain.class.isInstance(analyzer)) {
try (TokenStream tokenStream = analyzer.tokenStream(context.getFieldName(), value)) {
NamedList<List<NamedList>> namedList = new NamedList<>();
namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(analyzeTokenStream(tokenStream), context));
return namedList;
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
}
TokenizerChain tokenizerChain = (TokenizerChain) analyzer;
CharFilterFactory[] cfiltfacs = tokenizerChain.getCharFilterFactories();
TokenizerFactory tfac = tokenizerChain.getTokenizerFactory();
TokenFilterFactory[] filtfacs = tokenizerChain.getTokenFilterFactories();
NamedList<Object> namedList = new NamedList<>();
if (0 < cfiltfacs.length) {
String source = value;
for(CharFilterFactory cfiltfac : cfiltfacs ){
try (Reader sreader = new StringReader(source);
Reader reader = cfiltfac.create(sreader)) {
source = writeCharStream(namedList, reader);
} catch (IOException e) {
// do nothing.
}
}
}
TokenStream tokenStream = tfac.create();
((Tokenizer)tokenStream).setReader(tokenizerChain.initReader(null, new StringReader(value)));
List<AttributeSource> tokens = analyzeTokenStream(tokenStream);
namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokens, context));
ListBasedTokenStream listBasedTokenStream = new ListBasedTokenStream(tokenStream, tokens);
for (TokenFilterFactory tokenFilterFactory : filtfacs) {
for (final AttributeSource tok : tokens) {
tok.getAttribute(TokenTrackingAttribute.class).freezeStage();
}
// overwrite the vars "tokenStream", "tokens", and "listBasedTokenStream"
tokenStream = tokenFilterFactory.create(listBasedTokenStream);
tokens = analyzeTokenStream(tokenStream);
namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokens, context));
try {
listBasedTokenStream.close();
} catch (IOException e) {
// do nothing;
}
listBasedTokenStream = new ListBasedTokenStream(listBasedTokenStream, tokens);
}
try {
listBasedTokenStream.close();
} catch (IOException e) {
// do nothing.
}
return namedList;
}
/**
* Analyzes the given text using the given analyzer and returns the produced tokens.
*
* @param query The query to analyze.
* @param analyzer The analyzer to use.
*/
protected Set<BytesRef> getQueryTokenSet(String query, Analyzer analyzer) {
try (TokenStream tokenStream = analyzer.tokenStream("", query)){
final Set<BytesRef> tokens = new HashSet<>();
final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
tokenStream.reset();
while (tokenStream.incrementToken()) {
tokens.add(BytesRef.deepCopyOf(bytesAtt.getBytesRef()));
}
tokenStream.end();
return tokens;
} catch (IOException ioe) {
throw new RuntimeException("Error occurred while iterating over tokenstream", ioe);
}
}
/**
* Analyzes the given TokenStream, collecting the Tokens it produces.
*
* @param tokenStream TokenStream to analyze
*
* @return List of tokens produced from the TokenStream
*/
private List<AttributeSource> analyzeTokenStream(TokenStream tokenStream) {
final List<AttributeSource> tokens = new ArrayList<>();
final PositionIncrementAttribute posIncrAtt = tokenStream.addAttribute(PositionIncrementAttribute.class);
final TokenTrackingAttribute trackerAtt = tokenStream.addAttribute(TokenTrackingAttribute.class);
// for backwards compatibility, add all "common" attributes
tokenStream.addAttribute(OffsetAttribute.class);
tokenStream.addAttribute(TypeAttribute.class);
try {
tokenStream.reset();
int position = 0;
while (tokenStream.incrementToken()) {
position += posIncrAtt.getPositionIncrement();
trackerAtt.setActPosition(position);
tokens.add(tokenStream.cloneAttributes());
}
tokenStream.end(); // TODO should we capture?
} catch (IOException ioe) {
throw new RuntimeException("Error occurred while iterating over tokenstream", ioe);
} finally {
IOUtils.closeWhileHandlingException(tokenStream);
}
return tokens;
}
// a static mapping of the reflected attribute keys to the names used in Solr 1.4
static Map<String,String> ATTRIBUTE_MAPPING = Collections.unmodifiableMap(new HashMap<String,String>() {{
put(OffsetAttribute.class.getName() + "#startOffset", "start");
put(OffsetAttribute.class.getName() + "#endOffset", "end");
put(TypeAttribute.class.getName() + "#type", "type");
put(TokenTrackingAttribute.class.getName() + "#position", "position");
put(TokenTrackingAttribute.class.getName() + "#positionHistory", "positionHistory");
}});
/**
* Converts the list of Tokens to a list of NamedLists representing the tokens.
*
* @param tokenList Tokens to convert
* @param context The analysis context
*
* @return List of NamedLists containing the relevant information taken from the tokens
*/
private List<NamedList> convertTokensToNamedLists(final List<AttributeSource> tokenList, AnalysisContext context) {
final List<NamedList> tokensNamedLists = new ArrayList<>();
final FieldType fieldType = context.getFieldType();
final AttributeSource[] tokens = tokenList.toArray(new AttributeSource[tokenList.size()]);
// sort the tokens by absolute position
ArrayUtil.timSort(tokens, new Comparator<AttributeSource>() {
@Override
public int compare(AttributeSource a, AttributeSource b) {
return arrayCompare(
a.getAttribute(TokenTrackingAttribute.class).getPositions(),
b.getAttribute(TokenTrackingAttribute.class).getPositions()
);
}
private int arrayCompare(int[] a, int[] b) {
int p = 0;
final int stop = Math.min(a.length, b.length);
while(p < stop) {
int diff = a[p] - b[p];
if (diff != 0) return diff;
p++;
}
// One is a prefix of the other, or, they are equal:
return a.length - b.length;
}
});
for (int i = 0; i < tokens.length; i++) {
AttributeSource token = tokens[i];
final NamedList<Object> tokenNamedList = new SimpleOrderedMap<>();
final BytesRef rawBytes;
if (token.hasAttribute(BytesTermAttribute.class)) {
final BytesTermAttribute bytesAtt = token.getAttribute(BytesTermAttribute.class);
rawBytes = bytesAtt.getBytesRef();
} else {
final TermToBytesRefAttribute termAtt = token.getAttribute(TermToBytesRefAttribute.class);
rawBytes = termAtt.getBytesRef();
}
final String text = fieldType.indexedToReadable(rawBytes, new CharsRefBuilder()).toString();
tokenNamedList.add("text", text);
if (token.hasAttribute(CharTermAttribute.class)) {
final String rawText = token.getAttribute(CharTermAttribute.class).toString();
if (!rawText.equals(text)) {
tokenNamedList.add("raw_text", rawText);
}
}
tokenNamedList.add("raw_bytes", rawBytes.toString());
if (context.getTermsToMatch().contains(rawBytes)) {
tokenNamedList.add("match", true);
}
token.reflectWith(new AttributeReflector() {
@Override
public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
// leave out position and bytes term
if (TermToBytesRefAttribute.class.isAssignableFrom(attClass))
return;
if (CharTermAttribute.class.isAssignableFrom(attClass))
return;
if (PositionIncrementAttribute.class.isAssignableFrom(attClass))
return;
String k = attClass.getName() + '#' + key;
// map keys for "standard attributes":
if (ATTRIBUTE_MAPPING.containsKey(k)) {
k = ATTRIBUTE_MAPPING.get(k);
}
if (value instanceof BytesRef) {
final BytesRef p = (BytesRef) value;
value = p.toString();
}
tokenNamedList.add(k, value);
}
});
tokensNamedLists.add(tokenNamedList);
}
return tokensNamedLists;
}
private String writeCharStream(NamedList<Object> out, Reader input ){
final int BUFFER_SIZE = 1024;
char[] buf = new char[BUFFER_SIZE];
int len = 0;
StringBuilder sb = new StringBuilder();
do {
try {
len = input.read( buf, 0, BUFFER_SIZE );
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
if( len > 0 )
sb.append(buf, 0, len);
} while( len == BUFFER_SIZE );
out.add( input.getClass().getName(), sb.toString());
return sb.toString();
}
// ================================================= Inner classes =================================================
/**
* TokenStream that iterates over a list of pre-existing Tokens
* @lucene.internal
*/
protected final static class ListBasedTokenStream extends TokenStream {
private final List<AttributeSource> tokens;
private Iterator<AttributeSource> tokenIterator;
/**
* Creates a new ListBasedTokenStream which uses the given tokens as its token source.
*
* @param attributeSource source of the attribute factory and attribute impls
* @param tokens Source of tokens to be used
*/
ListBasedTokenStream(AttributeSource attributeSource, List<AttributeSource> tokens) {
super(attributeSource.getAttributeFactory());
this.tokens = tokens;
// Make sure all the attributes of the source are here too
addAttributes(attributeSource);
}
@Override
public void reset() throws IOException {
super.reset();
tokenIterator = tokens.iterator();
}
@Override
public boolean incrementToken() {
if (tokenIterator.hasNext()) {
clearAttributes();
AttributeSource next = tokenIterator.next();
addAttributes(next); // just in case there were delayed attribute additions
next.copyTo(this);
return true;
} else {
return false;
}
}
protected void addAttributes(AttributeSource attributeSource) {
// note: ideally we wouldn't call addAttributeImpl which is marked internal. But nonetheless it's possible
// this method is used by some custom attributes, especially since Solr doesn't provide a way to customize the
// AttributeFactory which is the recommended way to choose which classes implement which attributes.
Iterator<AttributeImpl> atts = attributeSource.getAttributeImplsIterator();
while (atts.hasNext()) {
addAttributeImpl(atts.next()); // adds both impl & interfaces
}
}
}
/** This is an {@link Attribute} used to track the positions of tokens
* in the analysis chain.
* @lucene.internal This class is only public for usage by the {@link AttributeSource} API.
*/
public interface TokenTrackingAttribute extends Attribute {
void freezeStage();
void setActPosition(int pos);
int[] getPositions();
void reset(int[] basePositions, int position);
}
/** Implementation of {@link TokenTrackingAttribute}.
* @lucene.internal This class is only public for usage by the {@link AttributeSource} API.
*/
public static final class TokenTrackingAttributeImpl extends AttributeImpl implements TokenTrackingAttribute {
private int[] basePositions = new int[0];
private int position = 0;
private transient int[] cachedPositions = null;
@Override
public void freezeStage() {
this.basePositions = getPositions();
this.position = 0;
this.cachedPositions = null;
}
@Override
public void setActPosition(int pos) {
this.position = pos;
this.cachedPositions = null;
}
@Override
public int[] getPositions() {
if (cachedPositions == null) {
cachedPositions = ArrayUtils.add(basePositions, position);
}
return cachedPositions;
}
@Override
public void reset(int[] basePositions, int position) {
this.basePositions = basePositions;
this.position = position;
this.cachedPositions = null;
}
@Override
public void clear() {
// we do nothing here, as all attribute values are controlled externally by consumer
}
@Override
public void reflectWith(AttributeReflector reflector) {
reflector.reflect(TokenTrackingAttribute.class, "position", position);
// convert to Integer[] array, as only such one can be serialized by ResponseWriters
reflector.reflect(TokenTrackingAttribute.class, "positionHistory", ArrayUtils.toObject(getPositions()));
}
@Override
public void copyTo(AttributeImpl target) {
final TokenTrackingAttribute t = (TokenTrackingAttribute) target;
t.reset(basePositions, position);
}
}
/**
* Serves as the context of an analysis process. This context contains the following constructs
*/
protected static class AnalysisContext {
private final String fieldName;
private final FieldType fieldType;
private final Analyzer analyzer;
private final Set<BytesRef> termsToMatch;
/**
* Constructs a new AnalysisContext with a given field tpe, analyzer and
* termsToMatch. By default the field name in this context will be
* {@code null}. During the analysis processs, The produced tokens will
* be compaired to the terms in the {@code termsToMatch} set. When found,
* these tokens will be marked as a match.
*
* @param fieldType The type of the field the analysis is performed on.
* @param analyzer The analyzer to be used.
* @param termsToMatch Holds all the terms that should match during the
* analysis process.
*/
public AnalysisContext(FieldType fieldType, Analyzer analyzer, Set<BytesRef> termsToMatch) {
this(null, fieldType, analyzer, termsToMatch);
}
/**
* Constructs an AnalysisContext with a given field name, field type
* and analyzer. By default this context will hold no terms to match
*
* @param fieldName The name of the field the analysis is performed on
* (may be {@code null}).
* @param fieldType The type of the field the analysis is performed on.
* @param analyzer The analyzer to be used during the analysis process.
*
*/
public AnalysisContext(String fieldName, FieldType fieldType, Analyzer analyzer) {
this(fieldName, fieldType, analyzer, EMPTY_BYTES_SET);
}
/**
* Constructs a new AnalysisContext with a given field tpe, analyzer and
* termsToMatch. During the analysis processs, The produced tokens will be
* compared to the terms in the {@code termsToMatch} set. When found,
* these tokens will be marked as a match.
*
* @param fieldName The name of the field the analysis is performed on
* (may be {@code null}).
* @param fieldType The type of the field the analysis is performed on.
* @param analyzer The analyzer to be used.
* @param termsToMatch Holds all the terms that should match during the
* analysis process.
*/
public AnalysisContext(String fieldName, FieldType fieldType, Analyzer analyzer, Set<BytesRef> termsToMatch) {
this.fieldName = fieldName;
this.fieldType = fieldType;
this.analyzer = analyzer;
this.termsToMatch = termsToMatch;
}
public String getFieldName() {
return fieldName;
}
public FieldType getFieldType() {
return fieldType;
}
public Analyzer getAnalyzer() {
return analyzer;
}
public Set<BytesRef> getTermsToMatch() {
return termsToMatch;
}
}
}
| 1 | 34,269 | I think best practice is to use `NamedList<Object>` as the return type, and `NamedList<?>` as the argument type in methods, but I can't find a reference for it right now. | apache-lucene-solr | java |
@@ -23,7 +23,9 @@ use Thelia\Core\Template\Element\PropelSearchLoopInterface;
use Thelia\Core\Template\Loop\Argument\Argument;
use Thelia\Core\Template\Loop\Argument\ArgumentCollection;
use Thelia\Coupon\Type\CouponInterface;
+use Thelia\Model\Base\CouponModule;
use Thelia\Model\Coupon as MCoupon;
+use Thelia\Model\CouponCountry;
use Thelia\Model\CouponQuery;
use Thelia\Model\Map\CouponTableMap;
use Thelia\Type\EnumListType; | 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Core\Template\Loop;
use Propel\Runtime\ActiveQuery\Criteria;
use Thelia\Condition\ConditionFactory;
use Thelia\Condition\Implementation\ConditionInterface;
use Thelia\Core\HttpFoundation\Request;
use Thelia\Core\Template\Element\BaseI18nLoop;
use Thelia\Core\Template\Element\LoopResult;
use Thelia\Core\Template\Element\LoopResultRow;
use Thelia\Core\Template\Element\PropelSearchLoopInterface;
use Thelia\Core\Template\Loop\Argument\Argument;
use Thelia\Core\Template\Loop\Argument\ArgumentCollection;
use Thelia\Coupon\Type\CouponInterface;
use Thelia\Model\Coupon as MCoupon;
use Thelia\Model\CouponQuery;
use Thelia\Model\Map\CouponTableMap;
use Thelia\Type\EnumListType;
use Thelia\Type\TypeCollection;
/**
* Coupon Loop
*
* @package Thelia\Core\Template\Loop
* @author Guillaume MOREL <[email protected]>
*
*/
class Coupon extends BaseI18nLoop implements PropelSearchLoopInterface
{
/**
* Define all args used in your loop
*
* @return ArgumentCollection
*/
protected function getArgDefinitions()
{
return new ArgumentCollection(
Argument::createIntListTypeArgument('id'),
Argument::createBooleanOrBothTypeArgument('is_enabled'),
new Argument(
'order',
new TypeCollection(
new EnumListType(array(
'id', 'id-reverse',
'code', 'code-reverse',
'title', 'title-reverse',
'enabled', 'enabled-reverse',
'expiration-date', 'expiration-date-reverse',
'days-left', 'days-left-reverse',
'usages-left', 'usages-left-reverse'
)
)
),
'code'
)
);
}
public function buildModelCriteria()
{
$search = CouponQuery::create();
/* manage translations */
$this->configureI18nProcessing($search, array('TITLE', 'DESCRIPTION', 'SHORT_DESCRIPTION'));
$id = $this->getId();
$isEnabled = $this->getIsEnabled();
if (null !== $id) {
$search->filterById($id, Criteria::IN);
}
if (isset($isEnabled)) {
$search->filterByIsEnabled($isEnabled ? true : false);
}
$search->addAsColumn('days_left', 'DATEDIFF('.CouponTableMap::EXPIRATION_DATE.', CURDATE()) - 1');
$orders = $this->getOrder();
foreach ($orders as $order) {
switch ($order) {
case 'id':
$search->orderById(Criteria::ASC);
break;
case 'id-reverse':
$search->orderById(Criteria::DESC);
break;
case 'code':
$search->orderByCode(Criteria::ASC);
break;
case 'code-reverse':
$search->orderByCode(Criteria::DESC);
break;
case 'title':
$search->addAscendingOrderByColumn('i18n_TITLE');
break;
case 'title-reverse':
$search->addDescendingOrderByColumn('i18n_TITLE');
break;
case 'enabled':
$search->orderByIsEnabled(Criteria::ASC);
break;
case 'enabled-reverse':
$search->orderByIsEnabled(Criteria::DESC);
break;
case 'expiration-date':
$search->orderByExpirationDate(Criteria::ASC);
break;
case 'expiration-date-reverse':
$search->orderByExpirationDate(Criteria::DESC);
break;
case 'usages-left':
$search->orderByMaxUsage(Criteria::ASC);
break;
case 'usages-left-reverse':
$search->orderByMaxUsage(Criteria::DESC);
break;
case 'days-left':
$search->addAscendingOrderByColumn('days_left');
break;
case 'days-left-reverse':
$search->addDescendingOrderByColumn('days_left');
break;
}
}
return $search;
}
public function parseResults(LoopResult $loopResult)
{
/** @var ConditionFactory $conditionFactory */
$conditionFactory = $this->container->get('thelia.condition.factory');
/** @var Request $request */
$request = $this->container->get('request');
/** @var Lang $lang */
$lang = $request->getSession()->getLang();
/** @var MCoupon $coupon */
foreach ($loopResult->getResultDataCollection() as $coupon) {
$loopResultRow = new LoopResultRow($coupon);
$conditions = $conditionFactory->unserializeConditionCollection(
$coupon->getSerializedConditions()
);
/** @var CouponInterface $couponManager */
$couponManager = $this->container->get($coupon->getType());
$couponManager->set(
$this->container->get('thelia.facade'),
$coupon->getCode(),
$coupon->getTitle(),
$coupon->getShortDescription(),
$coupon->getDescription(),
$coupon->getEffects(),
$coupon->getIsCumulative(),
$coupon->getIsRemovingPostage(),
$coupon->getIsAvailableOnSpecialOffers(),
$coupon->getIsEnabled(),
$coupon->getMaxUsage(),
$coupon->getExpirationDate()
);
$cleanedConditions = array();
/** @var ConditionInterface $condition */
foreach ($conditions as $condition) {
$temp = array(
'toolTip' => $condition->getToolTip(),
'summary' => $condition->getSummary()
);
$cleanedConditions[] = $temp;
}
$loopResultRow
->set("ID", $coupon->getId())
->set("IS_TRANSLATED", $coupon->getVirtualColumn('IS_TRANSLATED'))
->set("LOCALE", $this->locale)
->set("CODE", $coupon->getCode())
->set("TITLE", $coupon->getVirtualColumn('i18n_TITLE'))
->set("SHORT_DESCRIPTION", $coupon->getVirtualColumn('i18n_SHORT_DESCRIPTION'))
->set("DESCRIPTION", $coupon->getVirtualColumn('i18n_DESCRIPTION'))
->set("EXPIRATION_DATE", $coupon->getExpirationDate())
->set("USAGE_LEFT", $coupon->getMaxUsage())
->set("IS_CUMULATIVE", $coupon->getIsCumulative())
->set("IS_REMOVING_POSTAGE", $coupon->getIsRemovingPostage())
->set("IS_AVAILABLE_ON_SPECIAL_OFFERS", $coupon->getIsAvailableOnSpecialOffers())
->set("IS_ENABLED", $coupon->getIsEnabled())
->set("AMOUNT", $coupon->getAmount())
->set("APPLICATION_CONDITIONS", $cleanedConditions)
->set("TOOLTIP", $couponManager->getToolTip())
->set("DAY_LEFT_BEFORE_EXPIRATION", max(0, $coupon->getVirtualColumn('days_left')))
->set("SERVICE_ID", $couponManager->getServiceId());
$loopResult->addRow($loopResultRow);
}
return $loopResult;
}
}
| 1 | 10,091 | Be careful, the base model is imported here ! | thelia-thelia | php |
@@ -60,7 +60,7 @@ func RootCommand() (*cobra.Command, *Flags) {
rootCmd.PersistentFlags().IntVar(&cfg.HttpPort, "http.port", node.DefaultHTTPPort, "HTTP-RPC server listening port")
rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpCORSDomain, "http.corsdomain", []string{}, "Comma separated list of domains from which to accept cross origin requests (browser enforced)")
rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpVirtualHost, "http.vhosts", node.DefaultConfig.HTTPVirtualHosts, "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.")
- rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth"}, "API's offered over the HTTP-RPC interface")
+ rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth", "tg"}, "API's offered over the HTTP-RPC interface")
rootCmd.PersistentFlags().Uint64Var(&cfg.Gascap, "rpc.gascap", 0, "Sets a cap on gas that can be used in eth_call/estimateGas")
rootCmd.PersistentFlags().Uint64Var(&cfg.MaxTraces, "trace.maxtraces", 200, "Sets a limit on traces that can be returned in trace_filter")
rootCmd.PersistentFlags().StringVar(&cfg.TraceType, "trace.type", "parity", "Specify the type of tracing [geth|parity*] (experimental)") | 1 | package cli
import (
"context"
"fmt"
"net/http"
"time"
"github.com/ledgerwatch/turbo-geth/cmd/utils"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/internal/debug"
"github.com/ledgerwatch/turbo-geth/log"
"github.com/ledgerwatch/turbo-geth/node"
"github.com/ledgerwatch/turbo-geth/rpc"
"github.com/spf13/cobra"
)
type Flags struct {
PrivateApiAddr string
Chaindata string
HttpListenAddress string
TLSCertfile string
TLSCACert string
TLSKeyFile string
HttpPort int
HttpCORSDomain []string
HttpVirtualHost []string
API []string
Gascap uint64
MaxTraces uint64
TraceType string
WebsocketEnabled bool
}
var rootCmd = &cobra.Command{
Use: "rpcdaemon",
Short: "rpcdaemon is JSON RPC server that connects to turbo-geth node for remote DB access",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := utils.SetupCobra(cmd); err != nil {
return err
}
return nil
},
PersistentPostRunE: func(cmd *cobra.Command, args []string) error {
utils.StopDebug()
return nil
},
}
func RootCommand() (*cobra.Command, *Flags) {
utils.CobraFlags(rootCmd, append(debug.Flags, utils.MetricFlags...))
cfg := &Flags{}
rootCmd.PersistentFlags().StringVar(&cfg.PrivateApiAddr, "private.api.addr", "127.0.0.1:9090", "private api network address, for example: 127.0.0.1:9090, empty string means not to start the listener. do not expose to public network. serves remote database interface")
rootCmd.PersistentFlags().StringVar(&cfg.Chaindata, "chaindata", "", "path to the database")
rootCmd.PersistentFlags().StringVar(&cfg.HttpListenAddress, "http.addr", node.DefaultHTTPHost, "HTTP-RPC server listening interface")
rootCmd.PersistentFlags().StringVar(&cfg.TLSCertfile, "tls.cert", "", "certificate for client side TLS handshake")
rootCmd.PersistentFlags().StringVar(&cfg.TLSKeyFile, "tls.key", "", "key file for client side TLS handshake")
rootCmd.PersistentFlags().StringVar(&cfg.TLSCACert, "tls.cacert", "", "CA certificate for client side TLS handshake")
rootCmd.PersistentFlags().IntVar(&cfg.HttpPort, "http.port", node.DefaultHTTPPort, "HTTP-RPC server listening port")
rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpCORSDomain, "http.corsdomain", []string{}, "Comma separated list of domains from which to accept cross origin requests (browser enforced)")
rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpVirtualHost, "http.vhosts", node.DefaultConfig.HTTPVirtualHosts, "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.")
rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth"}, "API's offered over the HTTP-RPC interface")
rootCmd.PersistentFlags().Uint64Var(&cfg.Gascap, "rpc.gascap", 0, "Sets a cap on gas that can be used in eth_call/estimateGas")
rootCmd.PersistentFlags().Uint64Var(&cfg.MaxTraces, "trace.maxtraces", 200, "Sets a limit on traces that can be returned in trace_filter")
rootCmd.PersistentFlags().StringVar(&cfg.TraceType, "trace.type", "parity", "Specify the type of tracing [geth|parity*] (experimental)")
rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketEnabled, "ws", false, "Enable Websockets")
return rootCmd, cfg
}
func OpenDB(cfg Flags) (ethdb.KV, ethdb.Backend, error) {
var db ethdb.KV
var txPool ethdb.Backend
var err error
// Do not change the order of these checks. Chaindata needs to be checked first, because PrivateApiAddr has default value which is not ""
// If PrivateApiAddr is checked first, the Chaindata option will never work
if cfg.Chaindata != "" {
if database, errOpen := ethdb.Open(cfg.Chaindata); errOpen == nil {
db = database.KV()
} else {
err = errOpen
}
} else if cfg.PrivateApiAddr != "" {
db, txPool, err = ethdb.NewRemote2().Path(cfg.PrivateApiAddr).Open(cfg.TLSCertfile, cfg.TLSKeyFile, cfg.TLSCACert)
if err != nil {
return nil, nil, fmt.Errorf("could not connect to remoteDb: %w", err)
}
} else {
return nil, nil, fmt.Errorf("either remote db or lmdb must be specified")
}
if err != nil {
return nil, nil, fmt.Errorf("could not connect to remoteDb: %w", err)
}
return db, txPool, err
}
func StartRpcServer(ctx context.Context, cfg Flags, rpcAPI []rpc.API) error {
// register apis and create handler stack
httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort)
srv := rpc.NewServer()
if err := node.RegisterApisFromWhitelist(rpcAPI, cfg.API, srv, false); err != nil {
return fmt.Errorf("could not start register RPC apis: %w", err)
}
var err error
httpHandler := node.NewHTTPHandlerStack(srv, cfg.HttpCORSDomain, cfg.HttpVirtualHost)
var wsHandler http.Handler
if cfg.WebsocketEnabled {
wsHandler = srv.WebsocketHandler([]string{"*"})
}
var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if cfg.WebsocketEnabled && r.Method == "GET" {
wsHandler.ServeHTTP(w, r)
}
httpHandler.ServeHTTP(w, r)
})
listener, _, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler)
if err != nil {
return fmt.Errorf("could not start RPC api: %w", err)
}
if cfg.TraceType != "parity" {
log.Info("Tracing output type: ", cfg.TraceType)
}
log.Info("HTTP endpoint opened", "url", httpEndpoint, "ws", cfg.WebsocketEnabled)
defer func() {
srv.Stop()
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = listener.Shutdown(shutdownCtx)
log.Info("HTTP endpoint closed", "url", httpEndpoint)
}()
<-ctx.Done()
log.Info("Exiting...")
return nil
}
| 1 | 21,860 | Do we want to make this part of the default? Probably not. In fact, the default should probably be eth, web3 and net (which are the standard namespaces on other nodes). | ledgerwatch-erigon | go |
@@ -1,4 +1,8 @@
class Clump < ActiveRecord::Base
belongs_to :code_set
belongs_to :slave
+
+ def path
+ slave.path_from_code_set_id(code_set_id)
+ end
end | 1 | class Clump < ActiveRecord::Base
belongs_to :code_set
belongs_to :slave
end
| 1 | 8,314 | What is the plan when we deploy the Crawler VM project and eliminate the Clump model? | blackducksoftware-ohloh-ui | rb |
@@ -28,10 +28,6 @@ type MultiClusterConfig struct {
metav1.TypeMeta `json:",inline"`
// ControllerManagerConfigurationSpec returns the contfigurations for controllers
config.ControllerManagerConfigurationSpec `json:",inline"`
- // Leader is a role of ClusterSet member cluster
- Leader bool `json:"leader,omitempty"`
- // Member is a role of ClusterSet member cluster
- Member bool `json:"member,omitempty"`
}
func init() { | 1 | /*
Copyright 2021 Antrea Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
config "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
)
//+kubebuilder:object:root=true
// MultiClusterConfig is the Schema for the multiclusterconfigs API
type MultiClusterConfig struct {
metav1.TypeMeta `json:",inline"`
// ControllerManagerConfigurationSpec returns the contfigurations for controllers
config.ControllerManagerConfigurationSpec `json:",inline"`
// Leader is a role of ClusterSet member cluster
Leader bool `json:"leader,omitempty"`
// Member is a role of ClusterSet member cluster
Member bool `json:"member,omitempty"`
}
func init() {
SchemeBuilder.Register(&MultiClusterConfig{})
}
| 1 | 49,496 | these two are customized config fields, if we don't need them any more, we probably can use default ControllerManagerConfiguration | antrea-io-antrea | go |
@@ -39,7 +39,12 @@ var (
defaultExecutablePathProvider executablePathProvider = os.Executable
defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
defaultOwnerProvider ownerProvider = user.Current
- defaultRuntimeNameProvider runtimeNameProvider = func() string { return runtime.Compiler }
+ defaultRuntimeNameProvider runtimeNameProvider = func() string {
+ if runtime.Compiler == "gc" {
+ return "go"
+ }
+ return runtime.Compiler
+ }
defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resource // import "go.opentelemetry.io/otel/sdk/resource"
import (
"context"
"fmt"
"os"
"os/user"
"path/filepath"
"runtime"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
)
type pidProvider func() int
type executablePathProvider func() (string, error)
type commandArgsProvider func() []string
type ownerProvider func() (*user.User, error)
type runtimeNameProvider func() string
type runtimeVersionProvider func() string
type runtimeOSProvider func() string
type runtimeArchProvider func() string
var (
defaultPidProvider pidProvider = os.Getpid
defaultExecutablePathProvider executablePathProvider = os.Executable
defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
defaultOwnerProvider ownerProvider = user.Current
defaultRuntimeNameProvider runtimeNameProvider = func() string { return runtime.Compiler }
defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH }
)
var (
pid = defaultPidProvider
executablePath = defaultExecutablePathProvider
commandArgs = defaultCommandArgsProvider
owner = defaultOwnerProvider
runtimeName = defaultRuntimeNameProvider
runtimeVersion = defaultRuntimeVersionProvider
runtimeOS = defaultRuntimeOSProvider
runtimeArch = defaultRuntimeArchProvider
)
func setDefaultOSProviders() {
setOSProviders(
defaultPidProvider,
defaultExecutablePathProvider,
defaultCommandArgsProvider,
)
}
func setOSProviders(
pidProvider pidProvider,
executablePathProvider executablePathProvider,
commandArgsProvider commandArgsProvider,
) {
pid = pidProvider
executablePath = executablePathProvider
commandArgs = commandArgsProvider
}
func setDefaultRuntimeProviders() {
setRuntimeProviders(
defaultRuntimeNameProvider,
defaultRuntimeVersionProvider,
defaultRuntimeOSProvider,
defaultRuntimeArchProvider,
)
}
func setRuntimeProviders(
runtimeNameProvider runtimeNameProvider,
runtimeVersionProvider runtimeVersionProvider,
runtimeOSProvider runtimeOSProvider,
runtimeArchProvider runtimeArchProvider,
) {
runtimeName = runtimeNameProvider
runtimeVersion = runtimeVersionProvider
runtimeOS = runtimeOSProvider
runtimeArch = runtimeArchProvider
}
func setDefaultUserProviders() {
setUserProviders(defaultOwnerProvider)
}
func setUserProviders(ownerProvider ownerProvider) {
owner = ownerProvider
}
type processPIDDetector struct{}
type processExecutableNameDetector struct{}
type processExecutablePathDetector struct{}
type processCommandArgsDetector struct{}
type processOwnerDetector struct{}
type processRuntimeNameDetector struct{}
type processRuntimeVersionDetector struct{}
type processRuntimeDescriptionDetector struct{}
// Detect returns a *Resource that describes the process identifier (PID) of the
// executing process.
func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil
}
// Detect returns a *Resource that describes the name of the process executable.
func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
executableName := filepath.Base(commandArgs()[0])
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil
}
// Detect returns a *Resource that describes the full path of the process executable.
func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
executablePath, err := executablePath()
if err != nil {
return nil, err
}
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil
}
// Detect returns a *Resource that describes all the command arguments as received
// by the process.
func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil
}
// Detect returns a *Resource that describes the username of the user that owns the
// process.
func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
owner, err := owner()
if err != nil {
return nil, err
}
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil
}
// Detect returns a *Resource that describes the name of the compiler used to compile
// this process image.
func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil
}
// Detect returns a *Resource that describes the version of the runtime of this process.
func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil
}
// Detect returns a *Resource that describes the runtime of this process.
func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
runtimeDescription := fmt.Sprintf(
"go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
return NewWithAttributes(
semconv.SchemaURL,
semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription),
), nil
}
| 1 | 17,430 | I don't think we should be pulling the runtime name from the Compiler. There are two compiler frontends, but both of them compile the same source to generate the "go" runtime. I think until there is a request from an alternative implementation (the embedded world **might** have a different runtime), it's fine to hardcode this as "go" | open-telemetry-opentelemetry-go | go |
@@ -932,6 +932,10 @@ Model.find = function find (conditions, fields, options, callback) {
options = null;
}
+ if (this.schema.discriminatorMapping && fields) {
+ fields = fields + ' ' + this.schema.options.discriminatorKey;
+ }
+
// get the raw mongodb collection object
var mq = new Query({}, options, this, this.collection);
mq.select(fields); | 1 | /*!
* Module dependencies.
*/
var Document = require('./document')
, MongooseArray = require('./types/array')
, MongooseBuffer = require('./types/buffer')
, MongooseError = require('./error')
, VersionError = MongooseError.VersionError
, DivergentArrayError = MongooseError.DivergentArrayError
, Query = require('./query')
, Aggregate = require('./aggregate')
, Schema = require('./schema')
, Types = require('./schema/index')
, utils = require('./utils')
, hasOwnProperty = utils.object.hasOwnProperty
, isMongooseObject = utils.isMongooseObject
, EventEmitter = require('events').EventEmitter
, merge = utils.merge
, Promise = require('./promise')
, assert = require('assert')
, util = require('util')
, tick = utils.tick
, Query = require('./query.js')
var VERSION_WHERE = 1
, VERSION_INC = 2
, VERSION_ALL = VERSION_WHERE | VERSION_INC;
/**
* Model constructor
*
* Provides the interface to MongoDB collections as well as creates document instances.
*
* @param {Object} doc values with which to create the document
* @inherits Document
* @event `error`: If listening to this event, it is emitted when a document was saved without passing a callback and an `error` occurred. If not listening, the event bubbles to the connection used to create this Model.
* @event `index`: Emitted after `Model#ensureIndexes` completes. If an error occurred it is passed with the event.
* @api public
*/
function Model (doc, fields, skipId) {
Document.call(this, doc, fields, skipId);
};
/*!
* Inherits from Document.
*
* All Model.prototype features are available on
* top level (non-sub) documents.
*/
Model.prototype.__proto__ = Document.prototype;
/**
* Connection the model uses.
*
* @api public
* @property db
*/
Model.prototype.db;
/**
* Collection the model uses.
*
* @api public
* @property collection
*/
Model.prototype.collection;
/**
* The name of the model
*
* @api public
* @property modelName
*/
Model.prototype.modelName;
/*!
* Handles doc.save() callbacks
*/
function handleSave (promise, self) {
return tick(function handleSave (err, result) {
if (err) {
// If the initial insert fails provide a second chance.
// (If we did this all the time we would break updates)
if (self.$__.inserting) {
self.isNew = true;
self.emit('isNew', true);
}
promise.error(err);
promise = self = null;
return;
}
self.$__storeShard();
var numAffected;
if (result) {
// when inserting, the array of created docs is returned
numAffected = result.length
? result.length
: result;
} else {
numAffected = 0;
}
// was this an update that required a version bump?
if (self.$__.version && !self.$__.inserting) {
var doIncrement = VERSION_INC === (VERSION_INC & self.$__.version);
self.$__.version = undefined;
// increment version if was successful
if (numAffected > 0) {
if (doIncrement) {
var key = self.schema.options.versionKey;
var version = self.getValue(key) | 0;
self.setValue(key, version + 1);
}
} else {
// the update failed. pass an error back
promise.error(new VersionError);
promise = self = null;
return;
}
}
self.emit('save', self, numAffected);
promise.complete(self, numAffected);
promise = self = null;
});
}
/**
* Saves this document.
*
* ####Example:
*
* product.sold = Date.now();
* product.save(function (err, product, numberAffected) {
* if (err) ..
* })
*
* The callback will receive three parameters, `err` if an error occurred, `product` which is the saved `product`, and `numberAffected` which will be 1 when the document was found and updated in the database, otherwise 0.
*
* The `fn` callback is optional. If no `fn` is passed and validation fails, the validation error will be emitted on the connection used to create this model.
*
* var db = mongoose.createConnection(..);
* var schema = new Schema(..);
* var Product = db.model('Product', schema);
*
* db.on('error', handleError);
*
* However, if you desire more local error handling you can add an `error` listener to the model and handle errors there instead.
*
* Product.on('error', handleError);
*
* @param {Function} [fn] optional callback
* @api public
* @see middleware http://mongoosejs.com/docs/middleware.html
*/
Model.prototype.save = function save (fn) {
var promise = new Promise(fn)
, complete = handleSave(promise, this)
, options = {}
if (this.schema.options.safe) {
options.safe = this.schema.options.safe;
}
if (this.isNew) {
// send entire doc
var obj = this.toObject({ depopulate: 1 });
if (!utils.object.hasOwnProperty(obj || {}, '_id')) {
// documents must have an _id else mongoose won't know
// what to update later if more changes are made. the user
// wouldn't know what _id was generated by mongodb either
// nor would the ObjectId generated my mongodb necessarily
// match the schema definition.
return complete(new Error('document must have an _id before saving'));
}
this.$__version(true, obj);
this.collection.insert(obj, options, complete);
this.$__reset();
this.isNew = false;
this.emit('isNew', false);
// Make it possible to retry the insert
this.$__.inserting = true;
} else {
// Make sure we don't treat it as a new object on error,
// since it already exists
this.$__.inserting = false;
var delta = this.$__delta();
if (delta) {
if (delta instanceof Error) return complete(delta);
var where = this.$__where(delta[0]);
this.$__reset();
this.collection.update(where, delta[1], options, complete);
} else {
this.$__reset();
complete(null);
}
this.emit('isNew', false);
}
};
/*!
* Apply the operation to the delta (update) clause as
* well as track versioning for our where clause.
*
* @param {Document} self
* @param {Object} where
* @param {Object} delta
* @param {Object} data
* @param {Mixed} val
* @param {String} [operation]
*/
function operand (self, where, delta, data, val, op) {
// delta
op || (op = '$set');
if (!delta[op]) delta[op] = {};
delta[op][data.path] = val;
// disabled versioning?
if (false === self.schema.options.versionKey) return;
// already marked for versioning?
if (VERSION_ALL === (VERSION_ALL & self.$__.version)) return;
switch (op) {
case '$set':
case '$unset':
case '$pop':
case '$pull':
case '$pullAll':
case '$push':
case '$pushAll':
case '$addToSet':
break;
default:
// nothing to do
return;
}
// ensure updates sent with positional notation are
// editing the correct array element.
// only increment the version if an array position changes.
// modifying elements of an array is ok if position does not change.
if ('$push' == op || '$pushAll' == op || '$addToSet' == op) {
self.$__.version = VERSION_INC;
}
else if (/^\$p/.test(op)) {
// potentially changing array positions
self.increment();
}
else if (Array.isArray(val)) {
// $set an array
self.increment();
}
// now handling $set, $unset
else if (/\.\d+\.|\.\d+$/.test(data.path)) {
// subpath of array
self.$__.version = VERSION_WHERE;
}
}
/*!
* Compiles an update and where clause for a `val` with _atomics.
*
* @param {Document} self
* @param {Object} where
* @param {Object} delta
* @param {Object} data
* @param {Array} value
*/
function handleAtomics (self, where, delta, data, value) {
if (delta.$set && delta.$set[data.path]) {
// $set has precedence over other atomics
return;
}
if ('function' == typeof value.$__getAtomics) {
value.$__getAtomics().forEach(function (atomic) {
var op = atomic[0];
var val = atomic[1];
operand(self, where, delta, data, val, op);
})
return;
}
// legacy support for plugins
var atomics = value._atomics
, ops = Object.keys(atomics)
, i = ops.length
, val
, op;
if (0 === i) {
// $set
if (isMongooseObject(value)) {
value = value.toObject({ depopulate: 1 });
} else if (value.valueOf) {
value = value.valueOf();
}
return operand(self, where, delta, data, value);
}
while (i--) {
op = ops[i];
val = atomics[op];
if (isMongooseObject(val)) {
val = val.toObject({ depopulate: 1 })
} else if (Array.isArray(val)) {
val = val.map(function (mem) {
return isMongooseObject(mem)
? mem.toObject({ depopulate: 1 })
: mem;
})
} else if (val.valueOf) {
val = val.valueOf()
}
if ('$addToSet' === op)
val = { $each: val };
operand(self, where, delta, data, val, op);
}
}
/**
* Produces a special query document of the modified properties used in updates.
*
* @api private
* @method $__delta
* @memberOf Model
*/
Model.prototype.$__delta = function () {
var dirty = this.$__dirty();
if (!dirty.length && VERSION_ALL != this.$__.version) return;
var where = {}
, delta = {}
, len = dirty.length
, divergent = []
, d = 0
, val
, obj
for (; d < len; ++d) {
var data = dirty[d]
var value = data.value
var schema = data.schema
var match = checkDivergentArray(this, data.path, value);
if (match) {
divergent.push(match);
continue;
}
if (divergent.length) continue;
if (undefined === value) {
operand(this, where, delta, data, 1, '$unset');
} else if (null === value) {
operand(this, where, delta, data, null);
} else if (value._path && value._atomics) {
// arrays and other custom types (support plugins etc)
handleAtomics(this, where, delta, data, value);
} else if (value._path && Buffer.isBuffer(value)) {
// MongooseBuffer
value = value.toObject();
operand(this, where, delta, data, value);
} else {
value = utils.clone(value, { depopulate: 1 });
operand(this, where, delta, data, value);
}
}
if (divergent.length) {
return new DivergentArrayError(divergent);
}
if (this.$__.version) {
this.$__version(where, delta);
}
return [where, delta];
}
/*!
* Determine if array was populated with some form of filter and is now
* being updated in a manner which could overwrite data unintentionally.
*
* @see https://github.com/LearnBoost/mongoose/issues/1334
* @param {Document} doc
* @param {String} path
* @return {String|undefined}
*/
function checkDivergentArray (doc, path, array) {
// see if we populated this path
var pop = doc.populated(path, true);
if (!pop && doc.$__.selected) {
// If any array was selected using an $elemMatch projection, we deny the update.
// NOTE: MongoDB only supports projected $elemMatch on top level array.
var top = path.split('.')[0];
if (doc.$__.selected[top] && doc.$__.selected[top].$elemMatch) {
return top;
}
}
if (!(pop && array instanceof MongooseArray)) return;
// If the array was populated using options that prevented all
// documents from being returned (match, skip, limit) or they
// deselected the _id field, $pop and $set of the array are
// not safe operations. If _id was deselected, we do not know
// how to remove elements. $pop will pop off the _id from the end
// of the array in the db which is not guaranteed to be the
// same as the last element we have here. $set of the entire array
// would be similarily destructive as we never received all
// elements of the array and potentially would overwrite data.
var check = pop.options.match ||
pop.options.options && hasOwnProperty(pop.options.options, 'limit') || // 0 is not permitted
pop.options.options && pop.options.options.skip || // 0 is permitted
pop.options.select && // deselected _id?
(0 === pop.options.select._id ||
/\s?-_id\s?/.test(pop.options.select))
if (check) {
var atomics = array._atomics;
if (0 === Object.keys(atomics).length || atomics.$set || atomics.$pop) {
return path;
}
}
}
/**
* Appends versioning to the where and update clauses.
*
* @api private
* @method $__version
* @memberOf Model
*/
Model.prototype.$__version = function (where, delta) {
var key = this.schema.options.versionKey;
if (true === where) {
// this is an insert
if (key) this.setValue(key, delta[key] = 0);
return;
}
// updates
// only apply versioning if our versionKey was selected. else
// there is no way to select the correct version. we could fail
// fast here and force them to include the versionKey but
// thats a bit intrusive. can we do this automatically?
if (!this.isSelected(key)) {
return;
}
// $push $addToSet don't need the where clause set
if (VERSION_WHERE === (VERSION_WHERE & this.$__.version)) {
where[key] = this.getValue(key);
}
if (VERSION_INC === (VERSION_INC & this.$__.version)) {
delta.$inc || (delta.$inc = {});
delta.$inc[key] = 1;
}
}
/**
* Signal that we desire an increment of this documents version.
*
* ####Example:
*
* Model.findById(id, function (err, doc) {
* doc.increment();
* doc.save(function (err) { .. })
* })
*
* @see versionKeys http://mongoosejs.com/docs/guide.html#versionKey
* @api public
*/
Model.prototype.increment = function increment () {
this.$__.version = VERSION_ALL;
return this;
}
/**
* Returns a query object which applies shardkeys if they exist.
*
* @api private
* @method $__where
* @memberOf Model
*/
Model.prototype.$__where = function _where (where) {
where || (where = {});
var paths
, len
if (this.$__.shardval) {
paths = Object.keys(this.$__.shardval)
len = paths.length
for (var i = 0; i < len; ++i) {
where[paths[i]] = this.$__.shardval[paths[i]];
}
}
where._id = this._doc._id;
return where;
}
/**
* Removes this document from the db.
*
* ####Example:
*
* product.remove(function (err, product) {
* if (err) return handleError(err);
* Product.findById(product._id, function (err, product) {
* console.log(product) // null
* })
* })
*
* @param {Function} [fn] optional callback
* @api public
*/
Model.prototype.remove = function remove (fn) {
if (this.$__.removing) {
this.$__.removing.addBack(fn);
return this;
}
var promise = this.$__.removing = new Promise(fn)
, where = this.$__where()
, self = this
, options = {}
if (this.schema.options.safe) {
options.safe = this.schema.options.safe;
}
this.collection.remove(where, options, tick(function (err) {
if (err) {
promise.error(err);
promise = self = self.$__.removing = where = options = null;
return;
}
self.emit('remove', self);
promise.complete(self);
promise = self = where = options = null;
}));
return this;
};
/**
* Returns another Model instance.
*
* ####Example:
*
* var doc = new Tank;
* doc.model('User').findById(id, callback);
*
* @param {String} name model name
* @api public
*/
Model.prototype.model = function model (name) {
return this.db.model(name);
};
/**
* Adds a discriminator type.
*
* ####Example:
*
* function BaseSchema() {
* Schema.apply(this, arguments);
*
* this.add({
* name: String,
* createdAt: Date
* });
* }
* util.inherits(BaseSchema, Schema);
*
* var PersonSchema = new BaseSchema();
* var BossSchema = new BaseSchema({ department: String });
*
* var Person = mongoose.model('Person', PersonSchema);
* var Boss = Person.discriminator('Boss', BossSchema);
*
* @param {String} name discriminator model name
* @param {Schema} schema discriminator model schema
* @api public
*/
Model.discriminator = function discriminator (name, schema) {
if (!(schema instanceof Schema)) {
throw new Error("You must pass a valid discriminator Schema");
}
if (this.schema.discriminatorMapping && !this.schema.discriminatorMapping.isRoot) {
throw new Error("Discriminator \"" + name + "\" can only be a discriminator of the root model");
}
var key = this.schema.options.discriminatorKey;
if (schema.path(key)) {
throw new Error("Discriminator \"" + name + "\" cannot have field with name \"" + key + "\"");
}
// merges base schema into new discriminator schema and sets new type field.
(function mergeSchemas(schema, baseSchema) {
utils.merge(schema, baseSchema);
var obj = {};
obj[key] = { type: String, default: name };
schema.add(obj);
schema.discriminatorMapping = { key: key, value: name, isRoot: false };
if (baseSchema.options.collection) {
schema.options.collection = baseSchema.options.collection;
}
// throws error if options are invalid
(function validateOptions(a, b) {
a = utils.clone(a);
b = utils.clone(b);
delete a.toJSON;
delete a.toObject;
delete b.toJSON;
delete b.toObject;
if (!utils.deepEqual(a, b)) {
throw new Error("Discriminator options are not customizable (except toJSON & toObject)");
}
})(schema.options, baseSchema.options);
var toJSON = schema.options.toJSON
, toObject = schema.options.toObject;
schema.options = utils.clone(baseSchema.options);
if (toJSON) schema.options.toJSON = toJSON;
if (toObject) schema.options.toObject = toObject;
schema.callQueue = baseSchema.callQueue.concat(schema.callQueue);
schema._requiredpaths = undefined; // reset just in case Schema#requiredPaths() was called on either schema
})(schema, this.schema);
if (!this.discriminators) {
this.discriminators = {};
}
if (!this.schema.discriminatorMapping) {
this.schema.discriminatorMapping = { key: key, value: null, isRoot: true };
}
if (this.discriminators[name]) {
throw new Error("Discriminator with name \"" + name + "\" already exists");
}
this.discriminators[name] = this.db.model(name, schema, this.collection.name);
this.discriminators[name].prototype.__proto__ = this.prototype;
return this.discriminators[name];
};
// Model (class) features
/*!
* Give the constructor the ability to emit events.
*/
for (var i in EventEmitter.prototype)
Model[i] = EventEmitter.prototype[i];
/**
* Called when the model compiles.
*
* @api private
*/
Model.init = function init () {
if (this.schema.options.autoIndex) {
this.ensureIndexes();
}
this.schema.emit('init', this);
};
/**
* Sends `ensureIndex` commands to mongo for each index declared in the schema.
*
* ####Example:
*
* Event.ensureIndexes(function (err) {
* if (err) return handleError(err);
* });
*
* After completion, an `index` event is emitted on this `Model` passing an error if one occurred.
*
* ####Example:
*
* var eventSchema = new Schema({ thing: { type: 'string', unique: true }})
* var Event = mongoose.model('Event', eventSchema);
*
* Event.on('index', function (err) {
* if (err) console.error(err); // error occurred during index creation
* })
*
* _NOTE: It is not recommended that you run this in production. Index creation may impact database performance depending on your load. Use with caution._
*
* The `ensureIndex` commands are not sent in parallel. This is to avoid the `MongoError: cannot add index with a background operation in progress` error. See [this ticket](https://github.com/LearnBoost/mongoose/issues/1365) for more information.
*
* @param {Function} [cb] optional callback
* @return {Promise}
* @api public
*/
Model.ensureIndexes = function ensureIndexes (cb) {
var promise = new Promise(cb);
var indexes = this.schema.indexes();
if (!indexes.length) {
process.nextTick(promise.fulfill.bind(promise));
return promise;
}
// Indexes are created one-by-one to support how MongoDB < 2.4 deals
// with background indexes.
var self = this
, safe = self.schema.options.safe
function done (err) {
self.emit('index', err);
promise.resolve(err);
}
function create () {
var index = indexes.shift();
if (!index) return done();
var options = index[1];
options.safe = safe;
self.collection.ensureIndex(index[0], options, tick(function (err) {
if (err) return done(err);
create();
}));
}
create();
return promise;
}
/**
* Schema the model uses.
*
* @property schema
* @receiver Model
* @api public
*/
Model.schema;
/*!
* Connection instance the model uses.
*
* @property db
* @receiver Model
* @api public
*/
Model.db;
/*!
* Collection the model uses.
*
* @property collection
* @receiver Model
* @api public
*/
Model.collection;
/**
* Base Mongoose instance the model uses.
*
* @property base
* @receiver Model
* @api public
*/
Model.base;
/**
* Registered discriminators for this model.
*
* @property discriminators
* @receiver Model
* @api public
*/
Model.discriminators;
/**
* Removes documents from the collection.
*
* ####Example:
*
* Comment.remove({ title: 'baby born from alien father' }, function (err) {
*
* });
*
* ####Note:
*
* To remove documents without waiting for a response from MongoDB, do not pass a `callback`, then call `exec` on the returned [Query](#query-js):
*
* var query = Comment.remove({ _id: id });
* query.exec();
*
* ####Note:
*
* This method sends a remove command directly to MongoDB, no Mongoose documents are involved. Because no Mongoose documents are involved, _no middleware (hooks) are executed_.
*
* @param {Object} conditions
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.remove = function remove (conditions, callback) {
if ('function' === typeof conditions) {
callback = conditions;
conditions = {};
}
// get the mongodb collection object
var mq = new Query(conditions, {}, this, this.collection);
return mq.remove(callback);
};
/**
* Finds documents
*
* The `conditions` are cast to their respective SchemaTypes before the command is sent.
*
* ####Examples:
*
* // named john and at least 18
* MyModel.find({ name: 'john', age: { $gte: 18 }});
*
* // executes immediately, passing results to callback
* MyModel.find({ name: 'john', age: { $gte: 18 }}, function (err, docs) {});
*
* // name LIKE john and only selecting the "name" and "friends" fields, executing immediately
* MyModel.find({ name: /john/i }, 'name friends', function (err, docs) { })
*
* // passing options
* MyModel.find({ name: /john/i }, null, { skip: 10 })
*
* // passing options and executing immediately
* MyModel.find({ name: /john/i }, null, { skip: 10 }, function (err, docs) {});
*
* // executing a query explicitly
* var query = MyModel.find({ name: /john/i }, null, { skip: 10 })
* query.exec(function (err, docs) {});
*
* // using the promise returned from executing a query
* var query = MyModel.find({ name: /john/i }, null, { skip: 10 });
* var promise = query.exec();
* promise.addBack(function (err, docs) {});
*
* @param {Object} conditions
* @param {Object} [fields] optional fields to select
* @param {Object} [options] optional
* @param {Function} [callback]
* @return {Query}
* @see field selection #query_Query-select
* @see promise #promise-js
* @api public
*/
Model.find = function find (conditions, fields, options, callback) {
if ('function' == typeof conditions) {
callback = conditions;
conditions = {};
fields = null;
options = null;
} else if ('function' == typeof fields) {
callback = fields;
fields = null;
options = null;
} else if ('function' == typeof options) {
callback = options;
options = null;
}
// get the raw mongodb collection object
var mq = new Query({}, options, this, this.collection);
mq.select(fields);
return mq.find(conditions, callback);
};
/**
* Finds a single document by id.
*
* The `id` is cast based on the Schema before sending the command.
*
* ####Example:
*
* // find adventure by id and execute immediately
* Adventure.findById(id, function (err, adventure) {});
*
* // same as above
* Adventure.findById(id).exec(callback);
*
* // select only the adventures name and length
* Adventure.findById(id, 'name length', function (err, adventure) {});
*
* // same as above
* Adventure.findById(id, 'name length').exec(callback);
*
* // include all properties except for `length`
* Adventure.findById(id, '-length').exec(function (err, adventure) {});
*
* // passing options (in this case return the raw js objects, not mongoose documents by passing `lean`
* Adventure.findById(id, 'name', { lean: true }, function (err, doc) {});
*
* // same as above
* Adventure.findById(id, 'name').lean().exec(function (err, doc) {});
*
* @param {ObjectId|HexId} id objectid, or a value that can be casted to one
* @param {Object} [fields] optional fields to select
* @param {Object} [options] optional
* @param {Function} [callback]
* @return {Query}
* @see field selection #query_Query-select
* @see lean queries #query_Query-lean
* @api public
*/
Model.findById = function findById (id, fields, options, callback) {
return this.findOne({ _id: id }, fields, options, callback);
};
/**
* Finds one document.
*
* The `conditions` are cast to their respective SchemaTypes before the command is sent.
*
* ####Example:
*
* // find one iphone adventures - iphone adventures??
* Adventure.findOne({ type: 'iphone' }, function (err, adventure) {});
*
* // same as above
* Adventure.findOne({ type: 'iphone' }).exec(function (err, adventure) {});
*
* // select only the adventures name
* Adventure.findOne({ type: 'iphone' }, 'name', function (err, adventure) {});
*
* // same as above
* Adventure.findOne({ type: 'iphone' }, 'name').exec(function (err, adventure) {});
*
* // specify options, in this case lean
* Adventure.findOne({ type: 'iphone' }, 'name', { lean: true }, callback);
*
* // same as above
* Adventure.findOne({ type: 'iphone' }, 'name', { lean: true }).exec(callback);
*
* // chaining findOne queries (same as above)
* Adventure.findOne({ type: 'iphone' }).select('name').lean().exec(callback);
*
* @param {Object} conditions
* @param {Object} [fields] optional fields to select
* @param {Object} [options] optional
* @param {Function} [callback]
* @return {Query}
* @see field selection #query_Query-select
* @see lean queries #query_Query-lean
* @api public
*/
Model.findOne = function findOne (conditions, fields, options, callback) {
if ('function' == typeof options) {
callback = options;
options = null;
} else if ('function' == typeof fields) {
callback = fields;
fields = null;
options = null;
} else if ('function' == typeof conditions) {
callback = conditions;
conditions = {};
fields = null;
options = null;
}
// get the mongodb collection object
var mq = new Query({}, options, this, this.collection);
mq.select(fields);
return mq.findOne(conditions, callback);
};
/**
* Counts number of matching documents in a database collection.
*
* ####Example:
*
* Adventure.count({ type: 'jungle' }, function (err, count) {
* if (err) ..
* console.log('there are %d jungle adventures', count);
* });
*
* @param {Object} conditions
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.count = function count (conditions, callback) {
if ('function' === typeof conditions)
callback = conditions, conditions = {};
// get the mongodb collection object
var mq = new Query({}, {}, this, this.collection);
return mq.count(conditions, callback);
};
/**
* Creates a Query for a `distinct` operation.
*
* Passing a `callback` immediately executes the query.
*
* ####Example
*
* Link.distinct('url', { clicks: {$gt: 100}}, function (err, result) {
* if (err) return handleError(err);
*
* assert(Array.isArray(result));
* console.log('unique urls with more than 100 clicks', result);
* })
*
* var query = Link.distinct('url');
* query.exec(callback);
*
* @param {String} field
* @param {Object} [conditions] optional
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.distinct = function distinct (field, conditions, callback) {
// get the mongodb collection object
var mq = new Query({}, {}, this, this.collection);
if ('function' == typeof conditions) {
callback = conditions;
conditions = {};
}
return mq.distinct(conditions, field, callback);
};
/**
* Creates a Query, applies the passed conditions, and returns the Query.
*
* For example, instead of writing:
*
* User.find({age: {$gte: 21, $lte: 65}}, callback);
*
* we can instead write:
*
* User.where('age').gte(21).lte(65).exec(callback);
*
* Since the Query class also supports `where` you can continue chaining
*
* User
* .where('age').gte(21).lte(65)
* .where('name', /^b/i)
* ... etc
*
* @param {String} path
* @param {Object} [val] optional value
* @return {Query}
* @api public
*/
Model.where = function where (path, val) {
// get the mongodb collection object
var mq = new Query({}, {}, this, this.collection).find({});
return mq.where.apply(mq, arguments);
};
/**
* Creates a `Query` and specifies a `$where` condition.
*
* Sometimes you need to query for things in mongodb using a JavaScript expression. You can do so via `find({ $where: javascript })`, or you can use the mongoose shortcut method $where via a Query chain or from your mongoose Model.
*
* Blog.$where('this.comments.length > 5').exec(function (err, docs) {});
*
* @param {String|Function} argument is a javascript string or anonymous function
* @method $where
* @memberOf Model
* @return {Query}
* @see Query.$where #query_Query-%24where
* @api public
*/
Model.$where = function $where () {
var mq = new Query({}, {}, this, this.collection).find({});
return mq.$where.apply(mq, arguments);
};
/**
* Issues a mongodb findAndModify update command.
*
* Finds a matching document, updates it according to the `update` arg, passing any `options`, and returns the found document (if any) to the callback. The query executes immediately if `callback` is passed else a Query object is returned.
*
* ####Options:
*
* - `new`: bool - true to return the modified document rather than the original. defaults to true
* - `upsert`: bool - creates the object if it doesn't exist. defaults to false.
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `select`: sets the document fields to return
*
* ####Examples:
*
* A.findOneAndUpdate(conditions, update, options, callback) // executes
* A.findOneAndUpdate(conditions, update, options) // returns Query
* A.findOneAndUpdate(conditions, update, callback) // executes
* A.findOneAndUpdate(conditions, update) // returns Query
* A.findOneAndUpdate() // returns Query
*
* ####Note:
*
* All top level update keys which are not `atomic` operation names are treated as set operations:
*
* ####Example:
*
* var query = { name: 'borne' };
* Model.findOneAndUpdate(query, { name: 'jason borne' }, options, callback)
*
* // is sent as
* Model.findOneAndUpdate(query, { $set: { name: 'jason borne' }}, options, callback)
*
* This helps prevent accidentally overwriting your document with `{ name: 'jason borne' }`.
*
* ####Note:
*
* Although values are cast to their appropriate types when using the findAndModify helpers, the following are *not* applied:
*
* - defaults
* - setters
* - validators
* - middleware
*
* If you need those features, use the traditional approach of first retrieving the document.
*
* Model.findOne({ name: 'borne' }, function (err, doc) {
* if (err) ..
* doc.name = 'jason borne';
* doc.save(callback);
* })
*
* @param {Object} [conditions]
* @param {Object} [update]
* @param {Object} [options]
* @param {Function} [callback]
* @return {Query}
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
* @api public
*/
Model.findOneAndUpdate = function (conditions, update, options, callback) {
if ('function' == typeof options) {
callback = options;
options = null;
}
else if (1 === arguments.length) {
if ('function' == typeof conditions) {
var msg = 'Model.findOneAndUpdate(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findOneAndUpdate(conditions, update, options, callback)\n'
+ ' ' + this.modelName + '.findOneAndUpdate(conditions, update, options)\n'
+ ' ' + this.modelName + '.findOneAndUpdate(conditions, update)\n'
+ ' ' + this.modelName + '.findOneAndUpdate(update)\n'
+ ' ' + this.modelName + '.findOneAndUpdate()\n';
throw new TypeError(msg)
}
update = conditions;
conditions = undefined;
}
var fields;
if (options && options.fields) {
fields = options.fields;
options.fields = undefined;
}
var mq = new Query({}, {}, this, this.collection);
mq.select(fields);
return mq.findOneAndUpdate(conditions, update, options, callback);
}
/**
* Issues a mongodb findAndModify update command by a documents id.
*
* Finds a matching document, updates it according to the `update` arg, passing any `options`, and returns the found document (if any) to the callback. The query executes immediately if `callback` is passed else a Query object is returned.
*
* ####Options:
*
* - `new`: bool - true to return the modified document rather than the original. defaults to true
* - `upsert`: bool - creates the object if it doesn't exist. defaults to false.
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `select`: sets the document fields to return
*
* ####Examples:
*
* A.findByIdAndUpdate(id, update, options, callback) // executes
* A.findByIdAndUpdate(id, update, options) // returns Query
* A.findByIdAndUpdate(id, update, callback) // executes
* A.findByIdAndUpdate(id, update) // returns Query
* A.findByIdAndUpdate() // returns Query
*
* Finds a matching document, updates it according to the `update` arg, passing any `options`, and returns the found document (if any) to the callback. The query executes immediately if `callback` is passed else a Query object is returned.
*
* ####Options:
*
* - `new`: bool - true to return the modified document rather than the original. defaults to true
* - `upsert`: bool - creates the object if it doesn't exist. defaults to false.
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
*
* ####Note:
*
* All top level update keys which are not `atomic` operation names are treated as set operations:
*
* ####Example:
*
* Model.findByIdAndUpdate(id, { name: 'jason borne' }, options, callback)
*
* // is sent as
* Model.findByIdAndUpdate(id, { $set: { name: 'jason borne' }}, options, callback)
*
* This helps prevent accidentally overwriting your document with `{ name: 'jason borne' }`.
*
* ####Note:
*
* Although values are cast to their appropriate types when using the findAndModify helpers, the following are *not* applied:
*
* - defaults
* - setters
* - validators
* - middleware
*
* If you need those features, use the traditional approach of first retrieving the document.
*
* Model.findById(id, function (err, doc) {
* if (err) ..
* doc.name = 'jason borne';
* doc.save(callback);
* })
*
* @param {ObjectId|HexId} id an ObjectId or string that can be cast to one.
* @param {Object} [update]
* @param {Object} [options]
* @param {Function} [callback]
* @return {Query}
* @see Model.findOneAndUpdate #model_Model.findOneAndUpdate
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
* @api public
*/
Model.findByIdAndUpdate = function (id, update, options, callback) {
var args;
if (1 === arguments.length) {
if ('function' == typeof id) {
var msg = 'Model.findByIdAndUpdate(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findByIdAndUpdate(id, callback)\n'
+ ' ' + this.modelName + '.findByIdAndUpdate(id)\n'
+ ' ' + this.modelName + '.findByIdAndUpdate()\n';
throw new TypeError(msg)
}
return this.findOneAndUpdate({_id: id }, undefined);
}
args = utils.args(arguments, 1);
// if a model is passed in instead of an id
if (id && id._id) {
id = id._id;
}
if (id) {
args.unshift({ _id: id });
}
return this.findOneAndUpdate.apply(this, args);
}
/**
* Issue a mongodb findAndModify remove command.
*
* Finds a matching document, removes it, passing the found document (if any) to the callback.
*
* Executes immediately if `callback` is passed else a Query object is returned.
*
* ####Options:
*
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `select`: sets the document fields to return
*
* ####Examples:
*
* A.findOneAndRemove(conditions, options, callback) // executes
* A.findOneAndRemove(conditions, options) // return Query
* A.findOneAndRemove(conditions, callback) // executes
* A.findOneAndRemove(conditions) // returns Query
* A.findOneAndRemove() // returns Query
*
* Although values are cast to their appropriate types when using the findAndModify helpers, the following are *not* applied:
*
* - defaults
* - setters
* - validators
* - middleware
*
* If you need those features, use the traditional approach of first retrieving the document.
*
* Model.findById(id, function (err, doc) {
* if (err) ..
* doc.remove(callback);
* })
*
* @param {Object} conditions
* @param {Object} [options]
* @param {Function} [callback]
* @return {Query}
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
* @api public
*/
Model.findOneAndRemove = function (conditions, options, callback) {
if (1 === arguments.length && 'function' == typeof conditions) {
var msg = 'Model.findOneAndRemove(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findOneAndRemove(conditions, callback)\n'
+ ' ' + this.modelName + '.findOneAndRemove(conditions)\n'
+ ' ' + this.modelName + '.findOneAndRemove()\n';
throw new TypeError(msg)
}
if ('function' == typeof options) {
callback = options;
options = undefined;
}
var fields;
if (options) {
fields = options.select;
options.select = undefined;
}
var mq = new Query({}, {}, this, this.collection);
mq.select(fields);
return mq.findOneAndRemove(conditions, options, callback);
}
/**
* Issue a mongodb findAndModify remove command by a documents id.
*
* Finds a matching document, removes it, passing the found document (if any) to the callback.
*
* Executes immediately if `callback` is passed, else a `Query` object is returned.
*
* ####Options:
*
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `select`: sets the document fields to return
*
* ####Examples:
*
* A.findByIdAndRemove(id, options, callback) // executes
* A.findByIdAndRemove(id, options) // return Query
* A.findByIdAndRemove(id, callback) // executes
* A.findByIdAndRemove(id) // returns Query
* A.findByIdAndRemove() // returns Query
*
* @param {ObjectId|HexString} id ObjectId or string that can be cast to one
* @param {Object} [options]
* @param {Function} [callback]
* @return {Query}
* @see Model.findOneAndRemove #model_Model.findOneAndRemove
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
*/
Model.findByIdAndRemove = function (id, options, callback) {
if (1 === arguments.length && 'function' == typeof id) {
var msg = 'Model.findByIdAndRemove(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findByIdAndRemove(id, callback)\n'
+ ' ' + this.modelName + '.findByIdAndRemove(id)\n'
+ ' ' + this.modelName + '.findByIdAndRemove()\n';
throw new TypeError(msg)
}
return this.findOneAndRemove({ _id: id }, options, callback);
}
/**
* Shortcut for creating a new Document that is automatically saved to the db if valid.
*
* ####Example:
*
* // pass individual docs
* Candy.create({ type: 'jelly bean' }, { type: 'snickers' }, function (err, jellybean, snickers) {
* if (err) // ...
* });
*
* // pass an array
* var array = [{ type: 'jelly bean' }, { type: 'snickers' }];
* Candy.create(array, function (err, jellybean, snickers) {
* if (err) // ...
* });
*
* // callback is optional; use the returned promise if you like:
* var promise = Candy.create({ type: 'jawbreaker' });
* promise.then(function (jawbreaker) {
* // ...
* })
*
* @param {Array|Object...} doc(s)
* @param {Function} [fn] callback
* @return {Promise}
* @api public
*/
Model.create = function create (doc, fn) {
var promise = new Promise
, args
if (Array.isArray(doc)) {
args = doc;
if ('function' == typeof fn) {
promise.onResolve(fn);
}
} else {
var last = arguments[arguments.length - 1];
if ('function' == typeof last) {
promise.onResolve(last);
args = utils.args(arguments, 0, arguments.length - 1);
} else {
args = utils.args(arguments);
}
}
var count = args.length;
if (0 === count) {
promise.complete();
return promise;
}
var self = this;
var docs = [];
args.forEach(function (arg, i) {
var doc = new self(arg);
docs[i] = doc;
doc.save(function (err) {
if (err) return promise.error(err);
--count || promise.complete.apply(promise, docs);
});
});
return promise;
};
/**
* Updates documents in the database without returning them.
*
* ####Examples:
*
* MyModel.update({ age: { $gt: 18 } }, { oldEnough: true }, fn);
* MyModel.update({ name: 'Tobi' }, { ferret: true }, { multi: true }, function (err, numberAffected, raw) {
* if (err) return handleError(err);
* console.log('The number of updated documents was %d', numberAffected);
* console.log('The raw response from Mongo was ', raw);
* });
*
* ####Valid options:
*
* - `safe` (boolean) safe mode (defaults to value set in schema (true))
* - `upsert` (boolean) whether to create the doc if it doesn't match (false)
* - `multi` (boolean) whether multiple documents should be updated (false)
* - `strict` (boolean) overrides the `strict` option for this update
*
* All `update` values are cast to their appropriate SchemaTypes before being sent.
*
* The `callback` function receives `(err, numberAffected, rawResponse)`.
*
* - `err` is the error if any occurred
* - `numberAffected` is the count of updated documents Mongo reported
* - `rawResponse` is the full response from Mongo
*
* ####Note:
*
* All top level keys which are not `atomic` operation names are treated as set operations:
*
* ####Example:
*
* var query = { name: 'borne' };
* Model.update(query, { name: 'jason borne' }, options, callback)
*
* // is sent as
* Model.update(query, { $set: { name: 'jason borne' }}, options, callback)
*
* This helps prevent accidentally overwriting all documents in your collection with `{ name: 'jason borne' }`.
*
* ####Note:
*
* Be careful to not use an existing model instance for the update clause (this won't work and can cause weird behavior like infinite loops). Also, ensure that the update clause does not have an _id property, which causes Mongo to return a "Mod on _id not allowed" error.
*
* ####Note:
*
* To update documents without waiting for a response from MongoDB, do not pass a `callback`, then call `exec` on the returned [Query](#query-js):
*
* Comment.update({ _id: id }, { $set: { text: 'changed' }}).exec();
*
* ####Note:
*
* Although values are casted to their appropriate types when using update, the following are *not* applied:
*
* - defaults
* - setters
* - validators
* - middleware
*
* If you need those features, use the traditional approach of first retrieving the document.
*
* Model.findOne({ name: 'borne' }, function (err, doc) {
* if (err) ..
* doc.name = 'jason borne';
* doc.save(callback);
* })
*
* @see strict schemas http://mongoosejs.com/docs/guide.html#strict
* @param {Object} conditions
* @param {Object} update
* @param {Object} [options]
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.update = function update (conditions, doc, options, callback) {
var mq = new Query({}, {}, this, this.collection);
return mq.update(conditions, doc, options, callback);
};
/**
* Executes a mapReduce command.
*
* `o` is an object specifying all mapReduce options as well as the map and reduce functions. All options are delegated to the driver implementation.
*
* ####Example:
*
* var o = {};
* o.map = function () { emit(this.name, 1) }
* o.reduce = function (k, vals) { return vals.length }
* User.mapReduce(o, function (err, results) {
* console.log(results)
* })
*
* ####Other options:
*
* - `query` {Object} query filter object.
* - `limit` {Number} max number of documents
* - `keeptemp` {Boolean, default:false} keep temporary data
* - `finalize` {Function} finalize function
* - `scope` {Object} scope variables exposed to map/reduce/finalize during execution
* - `jsMode` {Boolean, default:false} it is possible to make the execution stay in JS. Provided in MongoDB > 2.0.X
* - `verbose` {Boolean, default:false} provide statistics on job execution time.
* - `out*` {Object, default: {inline:1}} sets the output target for the map reduce job.
*
* ####* out options:
*
* - `{inline:1}` the results are returned in an array
* - `{replace: 'collectionName'}` add the results to collectionName: the results replace the collection
* - `{reduce: 'collectionName'}` add the results to collectionName: if dups are detected, uses the reducer / finalize functions
* - `{merge: 'collectionName'}` add the results to collectionName: if dups exist the new docs overwrite the old
*
* If `options.out` is set to `replace`, `merge`, or `reduce`, a Model instance is returned that can be used for further querying. Queries run against this model are all executed with the `lean` option; meaning only the js object is returned and no Mongoose magic is applied (getters, setters, etc).
*
* ####Example:
*
* var o = {};
* o.map = function () { emit(this.name, 1) }
* o.reduce = function (k, vals) { return vals.length }
* o.out = { replace: 'createdCollectionNameForResults' }
* o.verbose = true;
*
* User.mapReduce(o, function (err, model, stats) {
* console.log('map reduce took %d ms', stats.processtime)
* model.find().where('value').gt(10).exec(function (err, docs) {
* console.log(docs);
* });
* })
*
* // a promise is returned so you may instead write
* var promise = User.mapReduce(o);
* promise.then(function (model, stats) {
* console.log('map reduce took %d ms', stats.processtime)
* return model.find().where('value').gt(10).exec();
* }).then(function (docs) {
* console.log(docs);
* }).then(null, handleError).end()
*
* @param {Object} o an object specifying map-reduce options
* @param {Function} [callback] optional callback
* @see http://www.mongodb.org/display/DOCS/MapReduce
* @return {Promise}
* @api public
*/
Model.mapReduce = function mapReduce (o, callback) {
var promise = new Promise(callback);
var self = this;
if (!Model.mapReduce.schema) {
var opts = { noId: true, noVirtualId: true, strict: false }
Model.mapReduce.schema = new Schema({}, opts);
}
if (!o.out) o.out = { inline: 1 };
o.map = String(o.map);
o.reduce = String(o.reduce);
if (o.query) {
var q = new Query(o.query);
q.cast(this);
o.query = q._conditions;
q = undefined;
}
this.collection.mapReduce(null, null, o, function (err, ret, stats) {
if (err) return promise.error(err);
if (ret.findOne && ret.mapReduce) {
// returned a collection, convert to Model
var model = Model.compile(
'_mapreduce_' + ret.collectionName
, Model.mapReduce.schema
, ret.collectionName
, self.db
, self.base);
model._mapreduce = true;
return promise.fulfill(model, stats);
}
promise.fulfill(ret, stats);
});
return promise;
}
/**
* geoNear support for Mongoose
*
* ####Options:
* - `lean` {Boolean} return the raw object
* - All options supported by the driver are also supported
*
* ####Example:
*
* // Legacy point
* Model.geoNear([1,3], { maxDistance : 5, spherical : true }, function(err, results, stats) {
* console.log(results);
* });
*
* // geoJson
* var point = { type : "Point", coordinates : [9,9] };
* Model.geoNear(point, { maxDistance : 5, spherical : true }, function(err, results, stats) {
* console.log(results);
* });
*
* @param {Object/Array} GeoJSON point or legacy coordinate pair [x,y] to search near
* @param {Object} options for the qurery
* @param {Function} [callback] optional callback for the query
* @return {Promise}
* @see http://docs.mongodb.org/manual/core/2dsphere/
* @see http://mongodb.github.io/node-mongodb-native/api-generated/collection.html?highlight=geonear#geoNear
* @api public
*/
Model.geoNear = function (near, options, callback) {
if ('function' == typeof options) {
callback = options;
options = {};
}
var promise = new Promise(callback);
if (!near) {
promise.error(new Error("Must pass a near option to geoNear"));
return promise;
}
var x,y;
if (Array.isArray(near)) {
if (near.length != 2) {
promise.error(new Error("If using legacy coordinates, must be an array of size 2 for geoNear"));
return promise;
}
x = near[0];
y = near[1];
} else {
if (near.type != "Point" || !Array.isArray(near.coordinates)) {
promise.error(new Error("Must pass either a legacy coordinate array or GeoJSON Point to geoNear"));
return promise;
}
x = near.coordinates[0];
y = near.coordinates[1];
}
var self = this;
this.collection.geoNear(x, y, options, function (err, res) {
if (err) return promise.error(err);
if (options.lean) return promise.fulfill(res.results, res.stats);
var count = res.results.length;
// if there are no results, fulfill the promise now
if (count == 0) {
return promise.fulfill(res.results, res.stats);
}
var errSeen = false;
for (var i=0; i < res.results.length; i++) {
var temp = res.results[i].obj;
res.results[i].obj = new self();
res.results[i].obj.init(temp, function (err) {
if (err && !errSeen) {
errSeen = true;
return promise.error(err);
}
--count || promise.fulfill(res.results, res.stats);
});
}
});
return promise;
};
/**
* Performs [aggregations](http://docs.mongodb.org/manual/applications/aggregation/) on the models collection.
*
* If a `callback` is passed, the `aggregate` is executed and a `Promise` is returned. If a callback is not passed, the `aggregate` itself is returned.
*
* ####Example:
*
* // Find the max balance of all accounts
* Users.aggregate(
* { $group: { _id: null, maxBalance: { $max: '$balance' }}}
* , { $project: { _id: 0, maxBalance: 1 }}
* , function (err, res) {
* if (err) return handleError(err);
* console.log(res); // [ { maxBalance: 98000 } ]
* });
*
* // Or use the aggregation pipeline builder.
* Users.aggregate()
* .group({ _id: null, maxBalance: { $max: '$balance' } })
* .select('-id maxBalance')
* .exec(function (err, res) {
* if (err) return handleError(err);
* console.log(res); // [ { maxBalance: 98 } ]
* });
*
* ####NOTE:
*
* - Arguments are not cast to the model's schema because `$project` operators allow redefining the "shape" of the documents at any stage of the pipeline, which may leave documents in an incompatible format.
* - The documents returned are plain javascript objects, not mongoose documents (since any shape of document can be returned).
* - Requires MongoDB >= 2.1
*
* @see Aggregate #aggregate_Aggregate
* @see MongoDB http://docs.mongodb.org/manual/applications/aggregation/
* @param {Object|Array} [...] aggregation pipeline operator(s) or operator array
* @param {Function} [callback]
* @return {Aggregate|Promise}
* @api public
*/
Model.aggregate = function aggregate () {
var args = [].slice.call(arguments)
, aggregate
, callback;
if ('function' === typeof args[args.length - 1]) {
callback = args.pop();
}
if (1 === args.length && util.isArray(args[0])) {
aggregate = new Aggregate(args[0]);
} else {
aggregate = new Aggregate(args);
}
aggregate.bind(this);
if ('undefined' === typeof callback) {
return aggregate;
}
return aggregate.exec(callback);
}
/**
* Implements `$geoSearch` functionality for Mongoose
*
* ####Example:
*
* var options = { near: [10, 10], maxDistance: 5 };
* Locations.geoSearch({ type : "house" }, options, function(err, res) {
* console.log(res);
* });
*
* ####Options:
* - `near` {Array} x,y point to search for
* - `maxDistance` {Number} the maximum distance from the point near that a result can be
* - `limit` {Number} The maximum number of results to return
* - `lean` {Boolean} return the raw object instead of the Mongoose Model
*
* @param {Object} condition an object that specifies the match condition (required)
* @param {Object} options for the geoSearch, some (near, maxDistance) are required
* @param {Function} [callback] optional callback
* @return {Promise}
* @see http://docs.mongodb.org/manual/reference/command/geoSearch/
* @see http://docs.mongodb.org/manual/core/geohaystack/
* @api public
*/
Model.geoSearch = function (conditions, options, callback) {
if ('function' == typeof options) {
callback = options;
options = {};
}
var promise = new Promise(callback);
if (conditions == undefined || !utils.isObject(conditions)) {
return promise.error(new Error("Must pass conditions to geoSearch"));
}
if (!options.near) {
return promise.error(new Error("Must specify the near option in geoSearch"));
}
if (!Array.isArray(options.near)) {
return promise.error(new Error("near option must be an array [x, y]"));
}
// send the conditions in the options object
options.search = conditions;
var self = this;
this.collection.geoHaystackSearch(options.near[0], options.near[1], options, function (err, res) {
// have to deal with driver problem. Should be fixed in a soon-ish release
// (7/8/2013)
if (err || res.errmsg) {
if (!err) err = new Error(res.errmsg);
if (res && res.code !== undefined) err.code = res.code;
return promise.error(err);
}
if (options.lean) return promise.fulfill(res.results, res.stats);
var count = res.results.length;
var errSeen = false;
for (var i=0; i < res.results.length; i++) {
var temp = res.results[i];
res.results[i] = new self();
res.results[i].init(temp, {}, function (err) {
if (err && !errSeen) {
errSeen = true;
return promise.error(err);
}
--count || (!errSeen && promise.fulfill(res.results, res.stats));
});
}
});
return promise;
};
/**
* Populates document references.
*
* ####Available options:
*
* - path: space delimited path(s) to populate
* - select: optional fields to select
* - match: optional query conditions to match
* - model: optional name of the model to use for population
* - options: optional query options like sort, limit, etc
*
* ####Examples:
*
* // populates a single object
* User.findById(id, function (err, user) {
* var opts = [
* { path: 'company', match: { x: 1 }, select: 'name' }
* , { path: 'notes', options: { limit: 10 }, model: 'override' }
* ]
*
* User.populate(user, opts, function (err, user) {
* console.log(user);
* })
* })
*
* // populates an array of objects
* User.find(match, function (err, users) {
* var opts = [{ path: 'company', match: { x: 1 }, select: 'name' }]
*
* var promise = User.populate(users, opts);
* promise.then(console.log).end();
* })
*
* // imagine a Weapon model exists with two saved documents:
* // { _id: 389, name: 'whip' }
* // { _id: 8921, name: 'boomerang' }
*
* var user = { name: 'Indiana Jones', weapon: 389 }
* Weapon.populate(user, { path: 'weapon', model: 'Weapon' }, function (err, user) {
* console.log(user.weapon.name) // whip
* })
*
* // populate many plain objects
* var users = [{ name: 'Indiana Jones', weapon: 389 }]
* users.push({ name: 'Batman', weapon: 8921 })
* Weapon.populate(users, { path: 'weapon' }, function (err, users) {
* users.forEach(function (user) {
* console.log('%s uses a %s', users.name, user.weapon.name)
* // Indiana Jones uses a whip
* // Batman uses a boomerang
* })
* })
* // Note that we didn't need to specify the Weapon model because
* // we were already using it's populate() method.
*
* @param {Document|Array} docs Either a single document or array of documents to populate.
* @param {Object} options A hash of key/val (path, options) used for population.
* @param {Function} [cb(err,doc)] Optional callback, executed upon completion. Receives `err` and the `doc(s)`.
* @return {Promise}
* @api public
*/
Model.populate = function (docs, paths, cb) {
var promise = new Promise(cb);
// always resolve on nextTick for consistent async behavior
function resolve () {
var args = utils.args(arguments);
process.nextTick(function () {
promise.resolve.apply(promise, args);
});
}
// normalized paths
var paths = utils.populate(paths);
var pending = paths.length;
if (0 === pending) {
resolve(null, docs);
return promise;
}
// each path has its own query options and must be executed separately
var i = pending;
var path;
while (i--) {
path = paths[i];
populate(this, docs, path, next);
}
return promise;
function next (err) {
if (err) return resolve(err);
if (--pending) return;
resolve(null, docs);
}
}
/*!
* Populates `docs`
*/
function populate (model, docs, options, cb) {
var select = options.select
, match = options.match
, path = options.path
var schema = model._getSchema(path);
var subpath;
// handle document arrays
if (schema && schema.caster) {
schema = schema.caster;
}
// model name for the populate query
var modelName = options.model && options.model.modelName
|| options.model // query options
|| schema && schema.options.ref // declared in schema
|| model.modelName // an ad-hoc structure
var Model = model.db.model(modelName);
// expose the model used
options.model = Model;
// normalize single / multiple docs passed
if (!Array.isArray(docs)) {
docs = [docs];
}
if (0 === docs.length || docs.every(utils.isNullOrUndefined)) {
return cb();
}
var rawIds = [];
var i, doc, id;
var len = docs.length;
var ret;
var found = 0;
var isDocument;
for (i = 0; i < len; i++) {
ret = undefined;
doc = docs[i];
id = String(utils.getValue("_id", doc));
isDocument = !! doc.$__;
if (isDocument && !doc.isModified(path)) {
// it is possible a previously populated path is being
// populated again. Because users can specify matcher
// clauses in their populate arguments we use the cached
// _ids from the original populate call to ensure all _ids
// are looked up, but only if the path wasn't modified which
// signifies the users intent of the state of the path.
ret = doc.populated(path);
}
if (!ret || Array.isArray(ret) && 0 === ret.length) {
ret = utils.getValue(path, doc);
}
if (ret) {
ret = convertTo_id(ret);
// previously we always assigned this even if the document had no _id
options._docs[id] = Array.isArray(ret)
? ret.slice()
: ret;
}
// always retain original values, even empty values. these are
// used to map the query results back to the correct position.
rawIds.push(ret);
if (isDocument) {
// cache original populated _ids and model used
doc.populated(path, options._docs[id], options);
}
}
var ids = utils.array.flatten(rawIds, function (item) {
// no need to include undefined values in our query
return undefined !== item;
});
if (0 === ids.length || ids.every(utils.isNullOrUndefined)) {
return cb();
}
// preserve original match conditions by copying
if (match) {
match = utils.object.shallowCopy(match);
} else {
match = {};
}
match._id || (match._id = { $in: ids });
var assignmentOpts = {};
assignmentOpts.sort = options.options && options.options.sort || undefined;
assignmentOpts.excludeId = /\s?-_id\s?/.test(select) || (select && 0 === select._id);
if (assignmentOpts.excludeId) {
// override the exclusion from the query so we can use the _id
// for document matching during assignment. we'll delete the
// _id back off before returning the result.
if ('string' == typeof select) {
select = select.replace(/\s?-_id\s?/g, ' ');
} else {
// preserve original select conditions by copying
select = utils.object.shallowCopy(select);
delete select._id;
}
}
// if a limit option is passed, we should have the limit apply to *each*
// document, not apply in the aggregate
if (options.options && options.options.limit) {
options.options.limit = options.options.limit * len;
}
Model.find(match, select, options.options, function (err, vals) {
if (err) return cb(err);
var lean = options.options && options.options.lean;
var len = vals.length;
var rawOrder = {};
var rawDocs = {}
var key;
var val;
// optimization:
// record the document positions as returned by
// the query result.
for (var i = 0; i < len; i++) {
val = vals[i];
key = String(utils.getValue('_id', val));
rawDocs[key] = val;
rawOrder[key] = i;
// flag each as result of population
if (!lean) val.$__.wasPopulated = true;
}
assignVals({
rawIds: rawIds,
rawDocs: rawDocs,
rawOrder: rawOrder,
docs: docs,
path: path,
options: assignmentOpts
});
cb();
});
}
/*!
* Retrieve the _id of `val` if a Document or Array of Documents.
*
* @param {Array|Document|Any} val
* @return {Array|Document|Any}
*/
function convertTo_id (val) {
if (val instanceof Model) return val._id;
if (Array.isArray(val)) {
for (var i = 0; i < val.length; ++i) {
if (val[i] instanceof Model) {
val[i] = val[i]._id;
}
}
return val;
}
return val;
}
/*!
* Assigns documents returned from a population query back
* to the original document path.
*/
function assignVals (o) {
// replace the original ids in our intermediate _ids structure
// with the documents found by query
assignRawDocsToIdStructure(o.rawIds, o.rawDocs, o.rawOrder, o.options);
// now update the original documents being populated using the
// result structure that contains real documents.
var docs = o.docs;
var path = o.path;
var rawIds = o.rawIds;
var options = o.options;
for (var i = 0; i < docs.length; ++i) {
utils.setValue(path, rawIds[i], docs[i], function (val) {
return valueFilter(val, options);
});
}
}
/*!
* 1) Apply backwards compatible find/findOne behavior to sub documents
*
* find logic:
* a) filter out non-documents
* b) remove _id from sub docs when user specified
*
* findOne
* a) if no doc found, set to null
* b) remove _id from sub docs when user specified
*
* 2) Remove _ids when specified by users query.
*
* background:
* _ids are left in the query even when user excludes them so
* that population mapping can occur.
*/
function valueFilter (val, assignmentOpts) {
if (Array.isArray(val)) {
// find logic
var ret = [];
for (var i = 0; i < val.length; ++i) {
var subdoc = val[i];
if (!isDoc(subdoc)) continue;
maybeRemoveId(subdoc, assignmentOpts);
ret.push(subdoc);
}
return ret;
}
// findOne
if (isDoc(val)) {
maybeRemoveId(val, assignmentOpts);
return val;
}
return null;
}
/*!
* Remove _id from `subdoc` if user specified "lean" query option
*/
function maybeRemoveId (subdoc, assignmentOpts) {
if (assignmentOpts.excludeId) {
if ('function' == typeof subdoc.setValue) {
subdoc.setValue('_id', undefined);
} else {
delete subdoc._id;
}
}
}
/*!
* Determine if `doc` is a document returned
* by a populate query.
*/
function isDoc (doc) {
if (null == doc)
return false;
var type = typeof doc;
if ('string' == type)
return false;
if ('number' == type)
return false;
if (Buffer.isBuffer(doc))
return false;
if ('ObjectID' == doc.constructor.name)
return false;
// only docs
return true;
}
/*!
* Assign `vals` returned by mongo query to the `rawIds`
* structure returned from utils.getVals() honoring
* query sort order if specified by user.
*
* This can be optimized.
*
* Rules:
*
* if the value of the path is not an array, use findOne rules, else find.
* for findOne the results are assigned directly to doc path (including null results).
* for find, if user specified sort order, results are assigned directly
* else documents are put back in original order of array if found in results
*
* @param {Array} rawIds
* @param {Array} vals
* @param {Boolean} sort
* @api private
*/
function assignRawDocsToIdStructure (rawIds, resultDocs, resultOrder, options, recursed) {
// honor user specified sort order
var newOrder = [];
var sorting = options.sort && rawIds.length > 1;
var found;
var doc;
var sid;
var id;
for (var i = 0; i < rawIds.length; ++i) {
id = rawIds[i];
if (Array.isArray(id)) {
// handle [ [id0, id2], [id3] ]
assignRawDocsToIdStructure(id, resultDocs, resultOrder, options, true);
newOrder.push(id);
continue;
}
if (null === id && !sorting) {
// keep nulls for findOne unless sorting, which always
// removes them (backward compat)
newOrder.push(id);
continue;
}
sid = String(id);
found = false;
if (recursed) {
// apply find behavior
// assign matching documents in original order unless sorting
doc = resultDocs[sid];
if (doc) {
if (sorting) {
newOrder[resultOrder[sid]] = doc;
} else {
newOrder.push(doc);
}
} else {
newOrder.push(id);
}
} else {
// apply findOne behavior - if document in results, assign, else assign null
newOrder[i] = doc = resultDocs[sid] || null;
}
}
rawIds.length = 0;
if (newOrder.length) {
// reassign the documents based on corrected order
// forEach skips over sparse entries in arrays so we
// can safely use this to our advantage dealing with sorted
// result sets too.
newOrder.forEach(function (doc, i) {
rawIds[i] = doc;
});
}
}
/**
* Finds the schema for `path`. This is different than
* calling `schema.path` as it also resolves paths with
* positional selectors (something.$.another.$.path).
*
* @param {String} path
* @return {Schema}
* @api private
*/
Model._getSchema = function _getSchema (path) {
var schema = this.schema
, pathschema = schema.path(path);
if (pathschema)
return pathschema;
// look for arrays
return (function search (parts, schema) {
var p = parts.length + 1
, foundschema
, trypath
while (p--) {
trypath = parts.slice(0, p).join('.');
foundschema = schema.path(trypath);
if (foundschema) {
if (foundschema.caster) {
// array of Mixed?
if (foundschema.caster instanceof Types.Mixed) {
return foundschema.caster;
}
// Now that we found the array, we need to check if there
// are remaining document paths to look up for casting.
// Also we need to handle array.$.path since schema.path
// doesn't work for that.
// If there is no foundschema.schema we are dealing with
// a path like array.$
if (p !== parts.length && foundschema.schema) {
if ('$' === parts[p]) {
// comments.$.comments.$.title
return search(parts.slice(p+1), foundschema.schema);
} else {
// this is the last path of the selector
return search(parts.slice(p), foundschema.schema);
}
}
}
return foundschema;
}
}
})(path.split('.'), schema)
}
/*!
* Compiler utility.
*
* @param {String} name model name
* @param {Schema} schema
* @param {String} collectionName
* @param {Connection} connection
* @param {Mongoose} base mongoose instance
*/
Model.compile = function compile (name, schema, collectionName, connection, base) {
var versioningEnabled = false !== schema.options.versionKey;
if (versioningEnabled && !schema.paths[schema.options.versionKey]) {
// add versioning to top level documents only
var o = {};
o[schema.options.versionKey] = Number;
schema.add(o);
}
// generate new class
function model (doc, fields, skipId) {
if (!(this instanceof model))
return new model(doc, fields, skipId);
Model.call(this, doc, fields, skipId);
};
model.base = base;
model.modelName = name;
model.__proto__ = Model;
model.prototype.__proto__ = Model.prototype;
model.model = Model.prototype.model;
model.db = model.prototype.db = connection;
model.discriminators = model.prototype.discriminators = undefined;
model.prototype.$__setSchema(schema);
var collectionOptions = {
bufferCommands: schema.options.bufferCommands
, capped: schema.options.capped
};
model.prototype.collection = connection.collection(
collectionName
, collectionOptions
);
// apply methods
for (var i in schema.methods)
model.prototype[i] = schema.methods[i];
// apply statics
for (var i in schema.statics)
model[i] = schema.statics[i];
model.schema = model.prototype.schema;
model.options = model.prototype.options;
model.collection = model.prototype.collection;
return model;
};
/*!
* Subclass this model with `conn`, `schema`, and `collection` settings.
*
* @param {Connection} conn
* @param {Schema} [schema]
* @param {String} [collection]
* @return {Model}
*/
Model.__subclass = function subclass (conn, schema, collection) {
// subclass model using this connection and collection name
var model = this;
var Model = function Model (doc, fields, skipId) {
if (!(this instanceof Model)) {
return new Model(doc, fields, skipId);
}
model.call(this, doc, fields, skipId);
}
Model.__proto__ = model;
Model.prototype.__proto__ = model.prototype;
Model.db = Model.prototype.db = conn;
var s = schema && 'string' != typeof schema
? schema
: model.prototype.schema;
var options = s.options || {};
if (!collection) {
collection = model.prototype.schema.get('collection')
|| utils.toCollectionName(model.modelName, options);
}
var collectionOptions = {
bufferCommands: s ? options.bufferCommands : true
, capped: s && options.capped
};
Model.prototype.collection = conn.collection(collection, collectionOptions);
Model.collection = Model.prototype.collection;
Model.init();
return Model;
}
/*!
* Module exports.
*/
module.exports = exports = Model;
| 1 | 12,302 | fields may be an object. | Automattic-mongoose | js |
@@ -4,12 +4,12 @@ import (
"bufio"
"encoding/json"
"fmt"
- "io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"sync"
+ "time"
"github.com/hashicorp/hcl"
"github.com/spiffe/spire/proto/agent/workloadattestor" | 1 | package k8s
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"sync"
"github.com/hashicorp/hcl"
"github.com/spiffe/spire/proto/agent/workloadattestor"
"github.com/spiffe/spire/proto/common"
spi "github.com/spiffe/spire/proto/common/plugin"
)
type k8sPlugin struct {
kubeletReadOnlyPort int
httpClient httpClient
fs fileSystem
mtx *sync.RWMutex
}
type k8sPluginConfig struct {
KubeletReadOnlyPort int `hcl:"kubelet_read_only_port"`
}
type podList struct {
// We only care about namespace, serviceAccountName and containerID
Metadata struct {
} `json:"metadata"`
Items []struct {
Metadata struct {
Namespace string `json:"namespace"`
} `json:"metadata"`
Spec struct {
ServiceAccountName string `json:"serviceAccountName"`
} `json:"spec"`
Status podStatus `json:"status"`
} `json:"items"`
}
type podStatus struct {
InitContainerStatuses []struct {
ContainerID string `json:"containerID"`
} `json:"initContainerStatuses"`
ContainerStatuses []struct {
ContainerID string `json:"containerID"`
} `json:"containerStatuses"`
}
const (
selectorType string = "k8s"
)
func (p *k8sPlugin) Attest(req *workloadattestor.AttestRequest) (*workloadattestor.AttestResponse, error) {
p.mtx.RLock()
defer p.mtx.RUnlock()
resp := workloadattestor.AttestResponse{}
cgroups, err := getCgroups(fmt.Sprintf("/proc/%v/cgroup", req.Pid), p.fs)
if err != nil {
return &resp, err
}
var containerID string
for _, cgroup := range cgroups {
// We are only interested in kube pods entries. Example entry:
// 11:hugetlb:/kubepods/burstable/pod2c48913c-b29f-11e7-9350-020968147796/9bca8d63d5fa610783847915bcff0ecac1273e5b4bed3f6fa1b07350e0135961
if len(cgroup[2]) < 9 {
continue
}
substring := cgroup[2][:9]
if substring == "/kubepods" {
parts := strings.Split(cgroup[2], "/")
if len(parts) < 5 {
log.Printf("Kube pod entry found, but without container id: %v", substring)
continue
}
containerID = parts[4]
break
}
}
// Not a Kubernetes pod
if containerID == "" {
return &resp, nil
}
httpResp, err := p.httpClient.Get(fmt.Sprintf("http://localhost:%v/pods", p.kubeletReadOnlyPort))
if err != nil {
return &resp, err
}
defer httpResp.Body.Close()
respBytes, err := ioutil.ReadAll(httpResp.Body)
var podInfo *podList
err = json.Unmarshal(respBytes, &podInfo)
if err != nil {
return &resp, err
}
for _, item := range podInfo.Items {
match, err := statusMatches(containerID, item.Status)
if err != nil {
return &resp, err
}
if match {
resp.Selectors = append(resp.Selectors, &common.Selector{Type: selectorType, Value: fmt.Sprintf("sa:%v", item.Spec.ServiceAccountName)})
resp.Selectors = append(resp.Selectors, &common.Selector{Type: selectorType, Value: fmt.Sprintf("ns:%v", item.Metadata.Namespace)})
return &resp, nil
}
}
return &resp, fmt.Errorf("no selectors found")
}
func statusMatches(containerID string, status podStatus) (bool, error) {
for _, status := range status.ContainerStatuses {
containerURL, err := url.Parse(status.ContainerID)
if err != nil {
return false, err
}
if containerID == containerURL.Host {
return true, nil
}
}
for _, status := range status.InitContainerStatuses {
containerURL, err := url.Parse(status.ContainerID)
if err != nil {
return false, err
}
if containerID == containerURL.Host {
return true, nil
}
}
return false, nil
}
func getCgroups(path string, fs fileSystem) (cgroups [][]string, err error) {
// http://man7.org/linux/man-pages/man7/cgroups.7.html
// https://www.kernel.org/doc/Documentation/cgroup-v2.txt
file, err := fs.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
token := scanner.Text()
substrings := strings.SplitN(token, ":", 3)
if len(substrings) < 3 {
return cgroups, fmt.Errorf("cgroup entry contains %v colons, but expected at least two colons: %v", len(substrings), token)
}
cgroups = append(cgroups, substrings)
}
return cgroups, err
}
func (p *k8sPlugin) Configure(req *spi.ConfigureRequest) (*spi.ConfigureResponse, error) {
p.mtx.Lock()
defer p.mtx.Unlock()
resp := &spi.ConfigureResponse{}
// Parse HCL config payload into config struct
config := &k8sPluginConfig{}
hclTree, err := hcl.Parse(req.Configuration)
if err != nil {
resp.ErrorList = []string{err.Error()}
return resp, err
}
err = hcl.DecodeObject(&config, hclTree)
if err != nil {
resp.ErrorList = []string{err.Error()}
return resp, err
}
// Set local vars from config struct
p.kubeletReadOnlyPort = config.KubeletReadOnlyPort
return &spi.ConfigureResponse{}, nil
}
func (*k8sPlugin) GetPluginInfo(*spi.GetPluginInfoRequest) (*spi.GetPluginInfoResponse, error) {
return &spi.GetPluginInfoResponse{}, nil
}
func New() *k8sPlugin {
return &k8sPlugin{
mtx: &sync.RWMutex{},
httpClient: &http.Client{},
fs: osFS{},
}
}
| 1 | 9,372 | i'm not confident these are the right defaults... anybody have input? | spiffe-spire | go |
@@ -80,7 +80,7 @@ type CasPool struct {
Namespace string
// DiskList is the list of disks over which a storagepool will be provisioned
- DiskList []string
+ DiskList []DiskGroup
// PoolType is the type of pool to be provisioned e.g. striped or mirrored
PoolType string | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// CasPoolKey is the key for the CasPool.
type CasPoolKey string
// CasPoolValString represents the string value for a CasPoolKey.
type CasPoolValString string
// CasPoolValInt represents the integer value for a CasPoolKey
type CasPoolValInt int
const (
// HostNameCPK is the kubernetes host name label
HostNameCPK CasPoolKey = "kubernetes.io/hostname"
// StoragePoolClaimCPK is the storage pool claim label
StoragePoolClaimCPK CasPoolKey = "openebs.io/storage-pool-claim"
// NdmDiskTypeCPK is the node-disk-manager disk type e.g. 'sparse' or 'disk'
NdmDiskTypeCPK CasPoolKey = "ndm.io/disk-type"
// PoolTypeMirroredCPV is a key for mirrored for pool
PoolTypeMirroredCPV CasPoolValString = "mirrored"
// PoolTypeStripedCPV is a key for striped for pool
PoolTypeStripedCPV CasPoolValString = "striped"
// PoolTypeRaidzCPV is a key for raidz for pool
PoolTypeRaidzCPV CasPoolValString = "raidz"
// PoolTypeRaidz2CPV is a key for raidz for pool
PoolTypeRaidz2CPV CasPoolValString = "raidz2"
// TypeSparseCPV is a key for sparse disk pool
TypeSparseCPV CasPoolValString = "sparse"
// TypeDiskCPV is a key for physical,iscsi,virtual etc disk pool
TypeDiskCPV CasPoolValString = "disk"
// StripedDiskCountCPV is the count for striped type pool
StripedDiskCountCPV CasPoolValInt = 1
// MirroredDiskCountCPV is the count for mirrored type pool
MirroredDiskCountCPV CasPoolValInt = 2
// RaidzDiskCountCPV is the count for raidz type pool
RaidzDiskCountCPV CasPoolValInt = 3
// Raidz2DiskCountCPV is the count for raidz2 type pool
Raidz2DiskCountCPV CasPoolValInt = 6
)
// CasPool is a type which will be utilised by CAS engine to perform
// storagepool related operation.
// TODO: Restrucutre CasPool struct.
type CasPool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// StoragePoolClaim is the name of the storagepoolclaim object
StoragePoolClaim string
// CasCreateTemplate is the cas template that will be used for storagepool create
// operation
CasCreateTemplate string
// CasDeleteTemplate is the cas template that will be used for storagepool delete
// operation
CasDeleteTemplate string
// Namespace can be passed via storagepoolclaim as labels to decide on the
// execution of namespaced resources with respect to storagepool
Namespace string
// DiskList is the list of disks over which a storagepool will be provisioned
DiskList []string
// PoolType is the type of pool to be provisioned e.g. striped or mirrored
PoolType string
// MaxPool is the maximum number of pool that should be provisioned
MaxPools int
// MinPool is the minimum number of pool that should be provisioned
MinPools int
// Type is the CasPool type e.g. sparse or openebs-cstor
Type string
// NodeName is the node where cstor pool will be created
NodeName string
// reSync will decide whether the event is a reconciliation event
ReSync bool
// PendingPoolCount is the number of pools that will be tried for creation as a part of reconciliation.
PendingPoolCount int
DeviceID []string
Disks DiskList
}
| 1 | 13,890 | DiskGroupList can be better name | openebs-maya | go |
@@ -229,7 +229,7 @@ func customErrorHandler(ctx context.Context, mux *runtime.ServeMux, m runtime.Ma
runtime.DefaultHTTPErrorHandler(ctx, mux, m, w, req, err)
}
-func New(unaryInterceptors []grpc.UnaryServerInterceptor, assets http.FileSystem, gatewayCfg *gatewayv1.GatewayOptions) (*Mux, error) {
+func New(unaryInterceptors []grpc.UnaryServerInterceptor, assets http.FileSystem, gatewayCfg *gatewayv1.GatewayOptions, metricsHandler http.Handler) (*Mux, error) {
secureCookies := true
if gatewayCfg.SecureCookies != nil {
secureCookies = gatewayCfg.SecureCookies.Value | 1 | package mux
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/http/pprof"
"net/textproto"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
gatewayv1 "github.com/lyft/clutch/backend/api/config/gateway/v1"
"github.com/lyft/clutch/backend/service"
awsservice "github.com/lyft/clutch/backend/service/aws"
)
const (
xHeader = "X-"
xForwardedFor = "X-Forwarded-For"
xForwardedHost = "X-Forwarded-Host"
)
var apiPattern = regexp.MustCompile(`^/v\d+/`)
type assetHandler struct {
assetCfg *gatewayv1.Assets
next http.Handler
fileSystem http.FileSystem
fileServer http.Handler
}
func copyHTTPResponse(resp *http.Response, w http.ResponseWriter) {
for key, values := range resp.Header {
for _, val := range values {
w.Header().Add(key, val)
}
}
w.WriteHeader(resp.StatusCode)
_, _ = io.Copy(w, resp.Body)
}
func (a *assetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if apiPattern.MatchString(r.URL.Path) || r.URL.Path == "/healthcheck" {
// Serve from the embedded API handler.
a.next.ServeHTTP(w, r)
return
}
// Check if assets are okay to serve by calling the Fetch endpoint and verifying it returns a 200.
rec := httptest.NewRecorder()
origPath := r.URL.Path
r.URL.Path = "/v1/assets/fetch"
a.next.ServeHTTP(rec, r)
if rec.Code != http.StatusOK {
copyHTTPResponse(rec.Result(), w)
return
}
// Set the original path.
r.URL.Path = origPath
// Serve!
if f, err := a.fileSystem.Open(r.URL.Path); err != nil {
// If not a known static asset and an asset provider is configured, try streaming from the configured provider.
if a.assetCfg != nil && a.assetCfg.Provider != nil && strings.HasPrefix(r.URL.Path, "/static/") {
// We attach this header simply for observability purposes.
// Otherwise its difficult to know if the assets are being served from the configured provider.
w.Header().Set("x-clutch-asset-passthrough", "true")
asset, err := a.assetProviderHandler(r.Context(), r.URL.Path)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(fmt.Sprintf("Error getting assets from the configured asset provider: %v", err)))
return
}
defer asset.Close()
_, err = io.Copy(w, asset)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(fmt.Sprintf("Error getting assets from the configured asset provider: %v", err)))
return
}
return
}
// If not a known static asset serve the SPA.
r.URL.Path = "/"
} else {
_ = f.Close()
}
a.fileServer.ServeHTTP(w, r)
}
func (a *assetHandler) assetProviderHandler(ctx context.Context, urlPath string) (io.ReadCloser, error) {
switch a.assetCfg.Provider.(type) {
case *gatewayv1.Assets_S3:
aws, err := getAssetProviderService(a.assetCfg)
if err != nil {
return nil, err
}
awsClient, ok := aws.(awsservice.Client)
if !ok {
return nil, fmt.Errorf("Unable to aquire the aws client")
}
return awsClient.S3StreamingGet(
ctx,
a.assetCfg.GetS3().Region,
a.assetCfg.GetS3().Bucket,
path.Join(a.assetCfg.GetS3().Key, strings.TrimPrefix(urlPath, "/static")),
)
default:
return nil, fmt.Errorf("configured asset provider has not been implemented")
}
}
// getAssetProviderService is used in two different contexts
// Its invoked in the mux constructor which checks if the necessary service has been configured,
// if there is an asset provider which requires ones.
//
// Otherwise its used to get the service for an asset provider in assetProviderHandler() if necessary.
func getAssetProviderService(assetCfg *gatewayv1.Assets) (service.Service, error) {
switch assetCfg.Provider.(type) {
case *gatewayv1.Assets_S3:
aws, ok := service.Registry[awsservice.Name]
if !ok {
return nil, fmt.Errorf("The AWS service must be configured to use the asset s3 provider.")
}
return aws, nil
default:
// An asset provider does not necessarily require a service to function properly
// if there is nothing configured for a provider type we cant necessarily throw an error here.
return nil, nil
}
}
func newCustomResponseForwarder(secureCookies bool) func(context.Context, http.ResponseWriter, proto.Message) error {
return func(ctx context.Context, w http.ResponseWriter, resp proto.Message) error {
md, ok := runtime.ServerMetadataFromContext(ctx)
if !ok {
return nil
}
if cookies := md.HeaderMD.Get("Set-Cookie-Token"); len(cookies) > 0 {
cookie := &http.Cookie{
Name: "token",
Value: cookies[0],
Path: "/",
HttpOnly: false,
Secure: secureCookies,
}
http.SetCookie(w, cookie)
}
if cookies := md.HeaderMD.Get("Set-Cookie-Refresh-Token"); len(cookies) > 0 {
cookie := &http.Cookie{
Name: "refreshToken",
Value: cookies[0],
Path: "/v1/authn/login",
HttpOnly: true, // Client cannot access refresh token, it is sent by browser only if login is attempted.
Secure: secureCookies,
}
http.SetCookie(w, cookie)
}
// Redirect if it's the browser (non-XHR).
redirects := md.HeaderMD.Get("Location")
if len(redirects) > 0 && isBrowser(requestHeadersFromResponseWriter(w)) {
code := http.StatusFound
if st := md.HeaderMD.Get("Location-Status"); len(st) > 0 {
headerCodeOverride, err := strconv.Atoi(st[0])
if err != nil {
return err
}
code = headerCodeOverride
}
w.Header().Set("Location", redirects[0])
w.WriteHeader(code)
}
return nil
}
}
func customHeaderMatcher(key string) (string, bool) {
key = textproto.CanonicalMIMEHeaderKey(key)
if strings.HasPrefix(key, xHeader) {
// exclude handling these headers as they are looked up by grpc's annotate context flow and added to the context
// metadata if they're not found
if key != xForwardedFor && key != xForwardedHost {
return runtime.MetadataPrefix + key, true
}
}
// the the default header mapping rule
return runtime.DefaultHeaderMatcher(key)
}
func customErrorHandler(ctx context.Context, mux *runtime.ServeMux, m runtime.Marshaler, w http.ResponseWriter, req *http.Request, err error) {
if isBrowser(req.Header) { // Redirect if it's the browser (non-XHR).
if s, ok := status.FromError(err); ok && s.Code() == codes.Unauthenticated {
redirectPath := fmt.Sprintf("/v1/authn/login?redirect_url=%s", url.QueryEscape(req.RequestURI))
http.Redirect(w, req, redirectPath, http.StatusFound)
return
}
}
runtime.DefaultHTTPErrorHandler(ctx, mux, m, w, req, err)
}
func New(unaryInterceptors []grpc.UnaryServerInterceptor, assets http.FileSystem, gatewayCfg *gatewayv1.GatewayOptions) (*Mux, error) {
secureCookies := true
if gatewayCfg.SecureCookies != nil {
secureCookies = gatewayCfg.SecureCookies.Value
}
grpcServer := grpc.NewServer(grpc.ChainUnaryInterceptor(unaryInterceptors...))
jsonGateway := runtime.NewServeMux(
runtime.WithForwardResponseOption(newCustomResponseForwarder(secureCookies)),
runtime.WithErrorHandler(customErrorHandler),
runtime.WithMarshalerOption(
runtime.MIMEWildcard,
&runtime.JSONPb{
MarshalOptions: protojson.MarshalOptions{
// Use camelCase for the JSON version.
UseProtoNames: false,
// Transmit zero-values over the wire.
EmitUnpopulated: true,
},
UnmarshalOptions: protojson.UnmarshalOptions{},
},
),
runtime.WithIncomingHeaderMatcher(customHeaderMatcher),
)
// If there is a configured asset provider, we check to see if the service is configured before proceeding.
// Bailing out early during the startup process instead of hitting this error at runtime when serving assets.
if gatewayCfg.Assets != nil && gatewayCfg.Assets.Provider != nil {
_, err := getAssetProviderService(gatewayCfg.Assets)
if err != nil {
return nil, err
}
}
httpMux := http.NewServeMux()
httpMux.Handle("/", &assetHandler{
assetCfg: gatewayCfg.Assets,
next: jsonGateway,
fileSystem: assets,
fileServer: http.FileServer(assets),
})
if gatewayCfg.EnablePprof {
httpMux.HandleFunc("/debug/pprof/", pprof.Index)
}
mux := &Mux{
GRPCServer: grpcServer,
JSONGateway: jsonGateway,
HTTPMux: httpMux,
}
return mux, nil
}
// Mux allows sharing one port between gRPC and the corresponding JSON gateway via header-based multiplexing.
type Mux struct {
// Create empty handlers for gRPC and grpc-gateway (JSON) traffic.
JSONGateway *runtime.ServeMux
HTTPMux http.Handler
GRPCServer *grpc.Server
}
// Adapted from https://github.com/grpc/grpc-go/blob/197c621/server.go#L760-L778.
func (m *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor == 2 && strings.HasPrefix(r.Header.Get("Content-Type"), "application/grpc") {
m.GRPCServer.ServeHTTP(w, r)
} else {
m.HTTPMux.ServeHTTP(w, r)
}
}
func (m *Mux) EnableGRPCReflection() {
reflection.Register(m.GRPCServer)
}
// "h2c" is the unencrypted form of HTTP/2.
func InsecureHandler(handler http.Handler) http.Handler {
return h2c.NewHandler(handler, &http2.Server{})
}
| 1 | 11,523 | nit: lets leave gateway options at the end of the func signature. | lyft-clutch | go |
@@ -527,7 +527,7 @@ public class PDKClient {
SharedPreferences sharedPref = _context.getSharedPreferences(PDK_SHARED_PREF_FILE_KEY, Context.MODE_PRIVATE);
SharedPreferences.Editor editor = sharedPref.edit();
editor.putString(PDK_SHARED_PREF_TOKEN_KEY, accessToken);
- editor.commit();
+ editor.apply();
}
private static String restoreAccessToken() { | 1 | package com.pinterest.android.pdk;
import android.app.Activity;
import android.content.ActivityNotFoundException;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.PackageInfo;
import android.graphics.Bitmap;
import android.net.Uri;
import android.os.Build;
import android.text.TextUtils;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.toolbox.Volley;
import org.apache.http.message.BasicNameValuePair;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class PDKClient {
public static final String PDKCLIENT_VERSION_CODE = "1.0";
public static final String PDKCLIENT_PERMISSION_READ_PUBLIC = "read_public";
public static final String PDKCLIENT_PERMISSION_WRITE_PUBLIC = "write_public";
public static final String PDKCLIENT_PERMISSION_READ_PRIVATE = "read_private";
public static final String PDKCLIENT_PERMISSION_WRITE_PRIVATE = "write_private";
public static final String PDKCLIENT_PERMISSION_READ_RELATIONSHIPS = "read_relationships";
public static final String PDKCLIENT_PERMISSION_WRITE_RELATIONSHIPS = "write_relationships";
public static final String PDK_QUERY_PARAM_FIELDS = "fields";
public static final String PDK_QUERY_PARAM_CURSOR = "cursor";
private static final String PDKCLIENT_EXTRA_APPID = "PDKCLIENT_EXTRA_APPID";
private static final String PDKCLIENT_EXTRA_APPNAME = "PDKCLIENT_EXTRA_APPNAME";
private static final String PDKCLIENT_EXTRA_PERMISSIONS = "PDKCLIENT_EXTRA_PERMISSIONS";
private static final String PDKCLIENT_EXTRA_RESULT = "PDKCLIENT_EXTRA_RESULT";
private static final String PDK_SHARED_PREF_FILE_KEY = "com.pinterest.android.pdk.PREF_FILE_KEY";
private static final String PDK_SHARED_PREF_TOKEN_KEY = "PDK_SHARED_PREF_TOKEN_KEY";
private static final String PDK_SHARED_PREF_SCOPES_KEY = "PDK_SHARED_PREF_SCOPES_KEY";
private static final int PDKCLIENT_REQUEST_CODE = 8772;
private static final String VOLLEY_TAG = "volley_tag";
private static final String PROD_BASE_API_URL = "https://api.pinterest.com/v1/";
private static final String PROD_WEB_OAUTH_URL = "https://api.pinterest.com/oauth/";
private static final String ME = "me/";
private static final String USER = "users/";
private static final String PINS = "pins/";
private static final String BOARDS = "boards/";
private static final String LIKES = "likes/";
private static final String FOLLOWERS = "followers/";
private static final String FOLLOWING = "following/";
private static final String INTERESTS = "interests/";
private static boolean _debugMode;
private static String _clientId;
private static Context _context;
private static String _accessToken;
private static Set<String> _scopes;
private static Set<String> _requestedScopes;
private static PDKClient _mInstance = null;
private PDKCallback _authCallback;
private static RequestQueue _requestQueue;
private static boolean _isConfigured;
private static boolean _isAuthenticated = false;
private static final String PINTEREST_PACKAGE = "com.pinterest";
private static final String PINTEREST_OAUTH_ACTIVITY = "com.pinterest.sdk.PinterestOauthActivity";
private PDKClient() {
}
public static PDKClient getInstance() {
if (_mInstance == null)
{
_mInstance = new PDKClient();
_requestQueue = getRequestQueue();
}
return _mInstance;
}
public static PDKClient configureInstance(Context context, String clientId) {
PDKClient._clientId = clientId;
PDKClient._context = context.getApplicationContext();
_isConfigured = true;
_accessToken = restoreAccessToken();
_scopes = restoreScopes();
_isAuthenticated = _accessToken != null;
return PDKClient.getInstance();
}
// ================================================================================
// Getters/Setters
// ================================================================================
/**
* Get state of debug mode
*
* @return true if enabled, false if disabled
*/
public static boolean isDebugMode() {
return _debugMode;
}
/**
* Enable/disable debug mode which will print logs when there are issues.
*
* @param debugMode true to enabled, false to disable
*/
public static void setDebugMode(boolean debugMode) {
PDKClient._debugMode = debugMode;
}
// ================================================================================
// API Interface
// ================================================================================
public void logout() {
_accessToken = null;
_scopes = null;
cancelPendingRequests();
saveAccessToken(null);
saveScopes(null);
}
public void login (final Context context, final List<String> permissions, final PDKCallback callback) {
_authCallback = callback;
if (Utils.isEmpty(permissions)) {
if (callback != null) callback.onFailure(new PDKException("Scopes cannot be empty"));
return;
}
if (!(context instanceof Activity)) {
if (callback != null) callback.onFailure(new PDKException("Please pass Activity context with login request"));
return;
}
_requestedScopes = new HashSet<String>();
_requestedScopes.addAll(permissions);
if (!Utils.isEmpty(_accessToken) && !Utils.isEmpty(_scopes)) {
getPath("oauth/inspect", null, new PDKCallback() {
@Override
public void onSuccess(PDKResponse response) {
if (verifyAccessToken(response.getData())) {
_isAuthenticated = true;
PDKClient.getInstance().getMe(_authCallback);
} else {
initiateLogin(context, permissions);
}
}
@Override
public void onFailure(PDKException exception) {
initiateLogin(context, permissions);
}
});
} else {
initiateLogin(context, permissions);
}
}
public void onOauthResponse(int requestCode, int resultCode, Intent data) {
if (requestCode == PDKCLIENT_REQUEST_CODE) {
if (resultCode == Activity.RESULT_OK) {
Utils.log("PDK: result - %s", data.getStringExtra(PDKCLIENT_EXTRA_RESULT));
onOauthResponse(data.getStringExtra(PDKCLIENT_EXTRA_RESULT));
} else {
Utils.log("PDK: Authentication failed");
_authCallback.onFailure(new PDKException("Authentication failed"));
}
}
}
public void onConnect(Context context) {
if (!(context instanceof Activity)) {
if (_authCallback != null) _authCallback.onFailure(new PDKException("Please pass Activity context with onConnect request"));
return;
}
Activity activity = (Activity) context;
if (Intent.ACTION_VIEW.equals(activity.getIntent().getAction())) {
Uri uri = activity.getIntent().getData();
if (uri != null && uri.toString().contains("pdk" + _clientId + "://"))
onOauthResponse(uri.toString());
}
}
public void getPath(String path, PDKCallback callback) {
getPath(path, null, callback);
}
public void getPath(String path, HashMap<String, String> params, PDKCallback callback) {
if (Utils.isEmpty(path)) {
if (callback != null) callback.onFailure(new PDKException("Invalid path"));
return;
}
String url = PROD_BASE_API_URL + path;
if (params == null) params = new HashMap<String, String>();
if (callback != null) callback.setPath(path);
if (callback != null) callback.setParams(params);
getRequest(url, params, callback);
}
public void postPath(String path, HashMap<String, String> params, PDKCallback callback) {
if (Utils.isEmpty(path)) {
if (callback != null) callback.onFailure(new PDKException("Invalid path"));
return;
}
if (callback != null) callback.setPath(path);
String url = PROD_BASE_API_URL + path;
postRequest(url, params, callback);
}
public void deletePath(String path, PDKCallback callback) {
if (Utils.isEmpty(path)) {
if (callback != null) callback.onFailure(new PDKException("Invalid path"));
return;
}
if (callback != null) callback.setPath(path);
String url = PROD_BASE_API_URL + path;
deleteRequest(url, null, callback);
}
public void putPath(String path, HashMap<String, String> params, PDKCallback callback) {
if (Utils.isEmpty(path)) {
if (callback != null) callback.onFailure(new PDKException("Invalid path"));
return;
}
if (callback != null) callback.setPath(path);
String url = PROD_BASE_API_URL + path;
putRequest(url, params, callback);
}
//Authorized user Endpoints
public void getMe(PDKCallback callback) {
getPath(ME, callback);
}
public void getMe(String fields, PDKCallback callback) {
getPath(ME, getMapWithFields(fields), callback);
}
public void getMyPins(String fields, PDKCallback callback) {
String path = ME + PINS;
getPath(path, getMapWithFields(fields), callback);
}
public void getMyBoards(String fields, PDKCallback callback) {
String path = ME + BOARDS;
getPath(path, getMapWithFields(fields), callback);
}
public void getMyLikes(String fields, PDKCallback callback) {
String path = ME + LIKES;
getPath(path, getMapWithFields(fields), callback);
}
public void getMyFollowers(String fields, PDKCallback callback) {
String path = ME + FOLLOWERS;
getPath(path, getMapWithFields(fields), callback);
}
public void getMyFollowedUsers(String fields, PDKCallback callback) {
String path = ME + FOLLOWING + USER;
getPath(path, getMapWithFields(fields), callback);
}
public void getMyFollowedBoards(String fields, PDKCallback callback) {
String path = ME + FOLLOWING + BOARDS;
getPath(path, getMapWithFields(fields), callback);
}
public void getMyFollowedInterests(String fields, PDKCallback callback) {
String path = ME + FOLLOWING + INTERESTS;
getPath(path, getMapWithFields(fields), callback);
}
//User Endpoint
public void getUser(String userId, String fields, PDKCallback callback) {
if (Utils.isEmpty(userId)) {
if (callback != null) callback.onFailure(new PDKException("Invalid user name/Id"));
return;
}
String path = USER + userId;
getPath(path, getMapWithFields(fields), callback);
}
//Board Endpoints
public void getBoard(String boardId, String fields, PDKCallback callback) {
if (Utils.isEmpty(boardId)) {
if (callback != null) callback.onFailure(new PDKException("Invalid board Id"));
return;
}
String path = BOARDS + boardId;
getPath(path, getMapWithFields(fields), callback);
}
public void getBoardPins(String boardId, String fields, PDKCallback callback) {
if (Utils.isEmpty(boardId)) {
if (callback != null) callback.onFailure(new PDKException("Invalid board Id"));
return;
}
String path = BOARDS + boardId + "/" + PINS;
getPath(path, getMapWithFields(fields), callback);
}
public void deleteBoard(String boardId, PDKCallback callback) {
if (Utils.isEmpty(boardId)) {
if (callback != null) callback.onFailure(new PDKException("Board Id cannot be empty"));
}
String path = BOARDS + boardId + "/";
deletePath(path, callback);
}
public void createBoard(String name, String desc, PDKCallback callback) {
if (Utils.isEmpty(name)) {
if (callback != null) callback.onFailure(new PDKException("Board name cannot be empty"));
return;
}
HashMap<String, String> params = new HashMap<String, String>();
params.put("name", name);
if (Utils.isEmpty(desc)) params.put("description", desc);
postPath(BOARDS, params, callback);
}
//Pin Endpoints
public void getPin(String pinId, String fields, PDKCallback callback) {
if (Utils.isEmpty(pinId)) {
if (callback != null) callback.onFailure(new PDKException("Invalid pin Id"));
return;
}
String path = PINS + pinId;
getPath(path, getMapWithFields(fields), callback);
}
/*
public void createPin(String note, String boardId, String imageUrl, String link, PDKCallback callback) {
if (Utils.isEmpty(note) || Utils.isEmpty(boardId) || Utils.isEmpty(imageUrl)) {
if (callback != null) callback.onFailure(new PDKException("Board Id, note, Image cannot be empty"));
return;
}
HashMap<String, String> params = new HashMap<String, String>();
params.put("board", boardId);
params.put("note", note);
if (!Utils.isEmpty(link)) params.put("link", link);
if (!Utils.isEmpty(link)) params.put("image_url", imageUrl);
postPath(PINS, params, callback);
}*/
public void createPin(String note, String boardId, Bitmap image, String link, PDKCallback callback) {
if (Utils.isEmpty(note) || Utils.isEmpty(boardId) || image == null) {
if (callback != null) callback.onFailure(new PDKException("Board Id, note, Image cannot be empty"));
return;
}
HashMap<String, String> params = new HashMap<String, String>();
params.put("board", boardId);
params.put("note", note);
params.put("image_base64", Utils.base64String(image));
if (!Utils.isEmpty(link)) params.put("link", link);
postPath(PINS, params, callback);
}
public void deletePin(String pinId, PDKCallback callback) {
if (Utils.isEmpty(pinId)) {
if (callback != null) callback.onFailure(new PDKException("Pin Id cannot be empty"));
}
String path = PINS + pinId + "/";
deletePath(path, callback);
}
// ================================================================================
// Internal
// ================================================================================
private void onOauthResponse(String result) {
if (!Utils.isEmpty(result)) {
Uri uri = Uri.parse(result);
if (uri.getQueryParameter("access_token") != null) {
String token = uri.getQueryParameter("access_token");
try {
token = java.net.URLDecoder.decode(token, "UTF-8");
} catch (UnsupportedEncodingException e) {
Utils.loge(e.getLocalizedMessage());
}
_accessToken = token;
_isAuthenticated = true;
PDKClient.getInstance().getMe(_authCallback);
saveAccessToken(_accessToken);
}
if (uri.getQueryParameter("error") != null) {
String error = uri.getQueryParameter("error");
Utils.loge("PDK: authentication error: %s", error);
}
}
if (_accessToken == null)
_authCallback.onFailure(new PDKException("PDK: authentication failed"));
}
private void initiateLogin(Context c, List<String> permissions) {
if (pinterestInstalled(_context)) {
Intent intent = createAuthIntent(_context, _clientId, permissions);
if (intent != null) {
openPinterestAppForLogin(c, intent);
} else {
initiateWebLogin(c, permissions);
}
} else {
initiateWebLogin(c, permissions);
}
}
private void initiateWebLogin(Context c, List<String> permissions) {
try {
List paramList = new LinkedList<BasicNameValuePair>();
paramList.add(new BasicNameValuePair("client_id", _clientId));
paramList.add(new BasicNameValuePair("scope", TextUtils.join(",", permissions)));
paramList.add(new BasicNameValuePair("redirect_uri", "pdk" + _clientId + "://"));
paramList.add(new BasicNameValuePair("response_type", "token"));
String url = Utils.getUrlWithQueryParams(PROD_WEB_OAUTH_URL, paramList);
Intent oauthIntent = new Intent(Intent.ACTION_VIEW, Uri.parse(url));
c.startActivity(oauthIntent);
} catch (Exception e) {
Utils.loge("PDK: Error initiating web oauth");
}
}
private void openPinterestAppForLogin(Context c, Intent intent) {
try {
//Utils.log("PDK: starting Pinterest app for auth");
((Activity)c).startActivityForResult(intent, PDKCLIENT_REQUEST_CODE);
} catch (ActivityNotFoundException e) {
// Ideally this should not happen because intent is not null
// initiate web login??
Utils.loge("PDK: failed to open Pinterest App for login");
return;
}
return;
}
private Intent createAuthIntent(Context context, String appId, List<String> permissions) {
return new Intent()
.setClassName(PINTEREST_PACKAGE, PINTEREST_OAUTH_ACTIVITY)
.putExtra(PDKCLIENT_EXTRA_APPID, appId)
.putExtra(PDKCLIENT_EXTRA_APPNAME, "appName")
.putExtra(PDKCLIENT_EXTRA_PERMISSIONS, TextUtils.join(",", permissions));
}
// //validate Pinterest Activity and/or package integrity
// private static Intent validateActivityIntent(Context context, Intent intent) {
// if (intent == null) {
// return null;
// }
//
// ResolveInfo resolveInfo = context.getPackageManager().resolveActivity(intent, 0);
// if (resolveInfo == null) {
// return null;
// }
//
// //validate pinterest app?
// // if (!appInfo.validateSignature(context, resolveInfo.activityInfo.packageName)) {
// // return null;
// // }
//
// return intent;
// }
/**
* Check if the device meets the requirements needed to pin using this library.
*
* @return true for supported, false otherwise
*/
private static boolean meetsRequirements() {
return Build.VERSION.SDK_INT >= 8;
}
/**
* Check if the device has Pinterest installed that supports PinIt Button
*
* @param context Application or Activity context
* @return true if requirements are met, false otherwise
*/
private static boolean pinterestInstalled(final Context context) {
if (!meetsRequirements())
return false;
boolean installed = false;
try {
PackageInfo info = context.getPackageManager().getPackageInfo(PINTEREST_PACKAGE, 0);
if (info != null) {
installed = info.versionCode >= 16;
//Utils.log("PDK versionCode:%s versionName:%s", info.versionCode,
// info.versionName);
}
if (!installed)
Utils.log("PDK: Pinterest App not installed or version too low!");
} catch (Exception e) {
Utils.loge(e.getLocalizedMessage());
installed = false;
}
return installed;
}
private void saveAccessToken(String accessToken) {
SharedPreferences sharedPref = _context.getSharedPreferences(PDK_SHARED_PREF_FILE_KEY, Context.MODE_PRIVATE);
SharedPreferences.Editor editor = sharedPref.edit();
editor.putString(PDK_SHARED_PREF_TOKEN_KEY, accessToken);
editor.commit();
}
private static String restoreAccessToken() {
SharedPreferences sharedPref = _context.getSharedPreferences(PDK_SHARED_PREF_FILE_KEY, Context.MODE_PRIVATE);
return sharedPref.getString(PDK_SHARED_PREF_TOKEN_KEY, null);
}
private void saveScopes(Set<String> perms) {
SharedPreferences sharedPref = _context.getSharedPreferences(PDK_SHARED_PREF_FILE_KEY, Context.MODE_PRIVATE);
SharedPreferences.Editor editor = sharedPref.edit();
editor.putStringSet(PDK_SHARED_PREF_SCOPES_KEY, perms);
editor.commit();
}
private static Set<String> restoreScopes() {
SharedPreferences sharedPref = _context.getSharedPreferences(PDK_SHARED_PREF_FILE_KEY, Context.MODE_PRIVATE);
return sharedPref.getStringSet(PDK_SHARED_PREF_SCOPES_KEY, new HashSet<String>());
}
private static RequestQueue getRequestQueue() {
if (_requestQueue == null) {
_requestQueue = Volley.newRequestQueue(_context);
}
return _requestQueue;
}
private static <T> void addToRequestQueue(Request<T> req) {
req.setTag(VOLLEY_TAG);
getRequestQueue().add(req);
}
private static void cancelPendingRequests() {
_requestQueue.cancelAll(VOLLEY_TAG);
}
private static boolean validateScopes(Set<String> requestedScopes) {
return _scopes.equals(requestedScopes);
}
private HashMap<String, String> getMapWithFields(String fields) {
HashMap map = new HashMap<String, String>();
map.put(PDK_QUERY_PARAM_FIELDS, fields);
return map;
}
private static Map<String, String> getHeaders() {
Map<String, String> headers = new HashMap<String, String>();
headers.put("User-Agent", String.format("PDK %s", PDKCLIENT_VERSION_CODE));
return headers;
}
private boolean verifyAccessToken(Object obj) {
boolean verified = false;
String appId = "";
Set<String> appScopes = new HashSet<String>();
try {
JSONObject jsonObject = (JSONObject)obj;
if (jsonObject.has("app")) {
JSONObject appObj = jsonObject.getJSONObject("app");
if (appObj.has("id")) {
appId = appObj.getString("id");
}
}
if (jsonObject.has("scopes")) {
JSONArray scopesArray = jsonObject.getJSONArray("scopes");
int size = scopesArray.length();
for (int i = 0; i < size; i++) {
appScopes.add(scopesArray.get(i).toString());
}
}
} catch (JSONException exception) {
Utils.loge("PDK: ", exception.getLocalizedMessage());
}
if (!Utils.isEmpty(appScopes)) {
saveScopes(appScopes);
}
if (!Utils.isEmpty(appId) && !Utils.isEmpty(appScopes)) {
if (appId.equalsIgnoreCase(_clientId) && appScopes.equals(_requestedScopes)) {
verified = true;
}
}
return verified;
}
private static Request getRequest(String url, HashMap<String, String> params, PDKCallback callback) {
Utils.log("PDK GET: %s", url);
List paramList = new LinkedList<>();
paramList.add(new BasicNameValuePair("access_token", _accessToken));
if (!Utils.isEmpty(params)) {
for (HashMap.Entry<String, String> e : params.entrySet()) {
paramList.add(new BasicNameValuePair(e.getKey(), e.getValue()));
}
}
url = Utils.getUrlWithQueryParams(url, paramList);
if (callback == null) callback = new PDKCallback();
PDKRequest request = new PDKRequest(Request.Method.GET, url, null, callback, getHeaders());
addToRequestQueue(request);
return request;
}
private static Request postRequest(String url, HashMap<String, String> params, PDKCallback callback) {
Utils.log(String.format("PDK POST: %s", url));
if (params == null) params = new HashMap<String, String>();
List queryParams = new LinkedList<>();
queryParams.add(new BasicNameValuePair("access_token", _accessToken));
url = Utils.getUrlWithQueryParams(url, queryParams);
if (callback == null) callback = new PDKCallback();
PDKRequest request = new PDKRequest(Request.Method.POST, url, new JSONObject(params), callback, getHeaders());
addToRequestQueue(request);
return request;
}
private static Request deleteRequest(String url, HashMap<String, String> params, PDKCallback callback) {
Utils.log(String.format("PDK DELETE: %s", url));
List queryParams = new LinkedList<>();
queryParams.add(new BasicNameValuePair("access_token", _accessToken));
url = Utils.getUrlWithQueryParams(url, queryParams);
if (callback == null) callback = new PDKCallback();
PDKRequest request = new PDKRequest(Request.Method.DELETE, url, null, callback, getHeaders());
request.setShouldCache(false);
addToRequestQueue(request);
return request;
}
private static Request putRequest(String url, HashMap<String, String> params, PDKCallback callback) {
Utils.log(String.format("PDK PUT: %s", url));
if (params == null) params = new HashMap<String, String>();
List queryParams = new LinkedList<>();
queryParams.add(new BasicNameValuePair("access_token", _accessToken));
url = Utils.getUrlWithQueryParams(url, queryParams);
if (callback == null) callback = new PDKCallback();
PDKRequest request = new PDKRequest(Request.Method.PUT, url, new JSONObject(params), callback, getHeaders());
addToRequestQueue(request);
return request;
}
}
| 1 | 12,310 | It will be better to use `commit()` on a separate thread, apart from the UI thread. The reason is `commit()` is synchronous while `apply()` is asynchronous. So in case it might not perform actions immediately as expected. | fossasia-phimpme-android | java |
@@ -123,10 +123,10 @@ func (q *queryImpl) validateTerminationState(
queryResult := terminationState.queryResult
validAnswered := queryResult.GetResultType().Equals(shared.QueryResultTypeAnswered) &&
queryResult.Answer != nil &&
- queryResult.ErrorMessage == nil
+ (queryResult.ErrorMessage == nil || *queryResult.ErrorMessage == "")
validFailed := queryResult.GetResultType().Equals(shared.QueryResultTypeFailed) &&
queryResult.Answer == nil &&
- queryResult.ErrorMessage != nil
+ (queryResult.ErrorMessage != nil && *queryResult.ErrorMessage != "")
if !validAnswered && !validFailed {
return errTerminationStateInvalid
} | 1 | // The MIT License (MIT)
//
// Copyright (c) 2019 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package history
import (
"sync/atomic"
"github.com/pborman/uuid"
"github.com/temporalio/temporal/.gen/go/shared"
)
const (
queryTerminationTypeCompleted queryTerminationType = iota
queryTerminationTypeUnblocked
queryTerminationTypeFailed
)
var (
errTerminationStateInvalid = &shared.InternalServiceError{Message: "query termination state invalid"}
errAlreadyInTerminalState = &shared.InternalServiceError{Message: "query already in terminal state"}
errQueryNotInTerminalState = &shared.InternalServiceError{Message: "query not in terminal state"}
)
type (
queryTerminationType int
query interface {
getQueryID() string
getQueryTermCh() <-chan struct{}
getQueryInput() *shared.WorkflowQuery
getTerminationState() (*queryTerminationState, error)
setTerminationState(*queryTerminationState) error
}
queryImpl struct {
id string
queryInput *shared.WorkflowQuery
termCh chan struct{}
terminationState atomic.Value
}
queryTerminationState struct {
queryTerminationType queryTerminationType
queryResult *shared.WorkflowQueryResult
failure error
}
)
func newQuery(queryInput *shared.WorkflowQuery) query {
return &queryImpl{
id: uuid.New(),
queryInput: queryInput,
termCh: make(chan struct{}),
}
}
func (q *queryImpl) getQueryID() string {
return q.id
}
func (q *queryImpl) getQueryTermCh() <-chan struct{} {
return q.termCh
}
func (q *queryImpl) getQueryInput() *shared.WorkflowQuery {
return q.queryInput
}
func (q *queryImpl) getTerminationState() (*queryTerminationState, error) {
ts := q.terminationState.Load()
if ts == nil {
return nil, errQueryNotInTerminalState
}
return ts.(*queryTerminationState), nil
}
func (q *queryImpl) setTerminationState(terminationState *queryTerminationState) error {
if err := q.validateTerminationState(terminationState); err != nil {
return err
}
currTerminationState, _ := q.getTerminationState()
if currTerminationState != nil {
return errAlreadyInTerminalState
}
q.terminationState.Store(terminationState)
close(q.termCh)
return nil
}
func (q *queryImpl) validateTerminationState(
terminationState *queryTerminationState,
) error {
if terminationState == nil {
return errTerminationStateInvalid
}
switch terminationState.queryTerminationType {
case queryTerminationTypeCompleted:
if terminationState.queryResult == nil || terminationState.failure != nil {
return errTerminationStateInvalid
}
queryResult := terminationState.queryResult
validAnswered := queryResult.GetResultType().Equals(shared.QueryResultTypeAnswered) &&
queryResult.Answer != nil &&
queryResult.ErrorMessage == nil
validFailed := queryResult.GetResultType().Equals(shared.QueryResultTypeFailed) &&
queryResult.Answer == nil &&
queryResult.ErrorMessage != nil
if !validAnswered && !validFailed {
return errTerminationStateInvalid
}
return nil
case queryTerminationTypeUnblocked:
if terminationState.queryResult != nil || terminationState.failure != nil {
return errTerminationStateInvalid
}
return nil
case queryTerminationTypeFailed:
if terminationState.queryResult != nil || terminationState.failure == nil {
return errTerminationStateInvalid
}
return nil
default:
return errTerminationStateInvalid
}
}
| 1 | 9,138 | Took me almost 4 hours to find this. | temporalio-temporal | go |
@@ -8,9 +8,9 @@ from .concat_dataset import ConcatDataset
from .repeat_dataset import RepeatDataset
from .extra_aug import ExtraAugmentation
+
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler',
'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale',
'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset',
- 'ExtraAugmentation'
-]
+ 'ExtraAugmentation'] | 1 | from .custom import CustomDataset
from .xml_style import XMLDataset
from .coco import CocoDataset
from .voc import VOCDataset
from .loader import GroupSampler, DistributedGroupSampler, build_dataloader
from .utils import to_tensor, random_scale, show_ann, get_dataset
from .concat_dataset import ConcatDataset
from .repeat_dataset import RepeatDataset
from .extra_aug import ExtraAugmentation
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler',
'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale',
'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset',
'ExtraAugmentation'
]
| 1 | 17,412 | There should be only a single blank line between imports and `__all__`. | open-mmlab-mmdetection | py |
@@ -194,7 +194,7 @@ public class TestCloudPseudoReturnFields extends SolrCloudTestCase {
SolrDocumentList docs = assertSearch(params("q", "*:*", "rows", "10", "fl",fl));
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
- assertEquals(fl + " => " + doc, 4, doc.size());
+ assertEquals(fl + " => " + doc, 5, doc.size());
assertTrue(fl + " => " + doc, doc.getFieldValue("id") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(fl + " => " + doc, doc.getFieldValue("subject") instanceof String); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import org.apache.commons.lang.StringUtils;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.schema.SchemaRequest.Field;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.client.solrj.response.schema.SchemaResponse.FieldResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.search.TestPseudoReturnFields;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
/**
* @see TestPseudoReturnFields
* @see TestRandomFlRTGCloud
*/
public class TestCloudPseudoReturnFields extends SolrCloudTestCase {
private static final String DEBUG_LABEL = MethodHandles.lookup().lookupClass().getName();
private static final String COLLECTION_NAME = DEBUG_LABEL + "_collection";
/** A basic client for operations at the cloud level, default collection will be set */
private static CloudSolrClient CLOUD_CLIENT;
/** One client per node */
private static ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
@BeforeClass
private static void createMiniSolrCloudCluster() throws Exception {
// multi replicas should matter...
final int repFactor = usually() ? 1 : 2;;
// ... but we definitely want to ensure forwarded requests to other shards work ...
final int numShards = 2;
// ... including some forwarded requests from nodes not hosting a shard
final int numNodes = 1 + (numShards * repFactor);
final String configName = DEBUG_LABEL + "_config-set";
final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");
configureCluster(numNodes).addConfig(configName, configDir).configure();
Map<String, String> collectionProperties = new HashMap<>();
collectionProperties.put("config", "solrconfig-tlog.xml");
collectionProperties.put("schema", "schema-psuedo-fields.xml");
CollectionAdminRequest.createCollection(COLLECTION_NAME, configName, numShards, repFactor)
.setProperties(collectionProperties)
.process(cluster.getSolrClient());
CLOUD_CLIENT = cluster.getSolrClient();
CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
waitForRecoveriesToFinish(CLOUD_CLIENT);
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
CLIENTS.add(getHttpSolrClient(jetty.getBaseUrl() + "/" + COLLECTION_NAME + "/"));
}
assertEquals(0, CLOUD_CLIENT.add(sdoc("id", "42", "val_i", "1", "ssto", "X", "subject", "aaa")).getStatus());
assertEquals(0, CLOUD_CLIENT.add(sdoc("id", "43", "val_i", "9", "ssto", "X", "subject", "bbb")).getStatus());
assertEquals(0, CLOUD_CLIENT.add(sdoc("id", "44", "val_i", "4", "ssto", "X", "subject", "aaa")).getStatus());
assertEquals(0, CLOUD_CLIENT.add(sdoc("id", "45", "val_i", "6", "ssto", "X", "subject", "aaa")).getStatus());
assertEquals(0, CLOUD_CLIENT.add(sdoc("id", "46", "val_i", "3", "ssto", "X", "subject", "ggg")).getStatus());
assertEquals(0, CLOUD_CLIENT.commit().getStatus());;
}
@Before
private void addUncommittedDoc99() throws Exception {
// uncommitted doc in transaction log at start of every test
// Even if an RTG causes ulog to re-open realtime searcher, next test method
// will get another copy of doc 99 in the ulog
assertEquals(0, CLOUD_CLIENT.add(sdoc("id", "99", "val_i", "1", "ssto", "X",
"subject", "uncommitted")).getStatus());
}
@AfterClass
private static void afterClass() throws Exception {
CLOUD_CLIENT.close(); CLOUD_CLIENT = null;
for (HttpSolrClient client : CLIENTS) {
client.close();
}
CLIENTS = null;
}
public void testMultiValued() throws Exception {
// the response writers used to consult isMultiValued on the field
// but this doesn't work when you alias a single valued field to
// a multi valued field (the field value is copied first, then
// if the type lookup is done again later, we get the wrong thing). SOLR-4036
// score as psuedo field - precondition checks
for (String name : new String[] {"score", "val_ss"}) {
try {
FieldResponse frsp = new Field(name, params("includeDynamic","true",
"showDefaults","true")).process(CLOUD_CLIENT);
assertNotNull("Test depends on a (dynamic) field matching '"+name+"', Null response", frsp);
assertEquals("Test depends on a (dynamic) field matching '"+name+"', bad status: " + frsp.toString(),
0, frsp.getStatus());
assertNotNull("Test depends on a (dynamic) field matching '"+name+
"', schema was changed out from under us? ... " + frsp.toString(), frsp.getField());
assertEquals("Test depends on a multivalued dynamic field matching '"+name+
"', schema was changed out from under us? ... " + frsp.toString(),
Boolean.TRUE, frsp.getField().get("multiValued"));
} catch (SolrServerException e) {
assertEquals("Couldn't fetch field for '"+name+"' ... schema changed out from under us?",
null, e);
}
}
SolrDocument doc = null;
// score as psuedo field
doc = assertSearchOneDoc(params("q","*:*", "fq", "id:42", "fl","id,score,val_ss,val2_ss"));
assertEquals("42", doc.getFieldValue("id"));
assertEquals(1.0F, doc.getFieldValue("score"));
assertEquals(""+doc, 2, doc.size()); // no value for val2_ss or val_ss ... yet...
// TODO: update this test & TestPseudoReturnFields to index docs using a (multivalued) "val_ss" instead of "ssto"
//
// that way we can first sanity check a single value in a multivalued field is returned correctly
// as a "List" of one element, *AND* then we could be testing that a (single valued) psuedo-field correctly
// overrides that actual (real) value in a multivalued field (ie: not returning a an List)
//
// (NOTE: not doing this yet due to how it will impact most other tests, many of which are currently
// @AwaitsFix'ed)
//
//assertTrue(doc.getFieldValue("val_ss").getClass().toString(),
// doc.getFieldValue("val_ss") instanceof List);
// single value int using alias that matches multivalued dynamic field
doc = assertSearchOneDoc(params("q","id:42", "fl","val_ss:val_i, val2_ss:10"));
assertEquals(""+doc, 2, doc.size());
assertEquals(""+doc, 1, doc.getFieldValue("val_ss"));
assertEquals(""+doc, 10L, doc.getFieldValue("val2_ss"));
}
public void testMultiValuedRTG() throws Exception {
SolrDocument doc = null;
// check same results as testMultiValued via RTG (committed doc)
doc = getRandClient(random()).getById("42", params("fl","val_ss:val_i, val2_ss:10, subject"));
assertEquals(""+doc, 3, doc.size());
assertEquals(""+doc, 1, doc.getFieldValue("val_ss"));
assertEquals(""+doc, 10L, doc.getFieldValue("val2_ss"));
assertEquals(""+doc, "aaa", doc.getFieldValue("subject"));
// also check real-time-get from transaction log (uncommitted doc)
doc = getRandClient(random()).getById("99", params("fl","val_ss:val_i, val2_ss:10, subject"));
assertEquals(""+doc, 3, doc.size());
assertEquals(""+doc, 1, doc.getFieldValue("val_ss"));
assertEquals(""+doc, 10L, doc.getFieldValue("val2_ss"));
assertEquals(""+doc, "uncommitted", doc.getFieldValue("subject"));
}
public void testAllRealFields() throws Exception {
for (String fl : TestPseudoReturnFields.ALL_REAL_FIELDS) {
SolrDocumentList docs = assertSearch(params("q", "*:*", "rows", "10", "fl",fl));
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
assertEquals(fl + " => " + doc, 4, doc.size());
assertTrue(fl + " => " + doc, doc.getFieldValue("id") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(fl + " => " + doc, doc.getFieldValue("subject") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("ssto") instanceof String); // TODO: val_ss: List<String>
}
}
}
public void testAllRealFieldsRTG() throws Exception {
// shouldn't matter if we use RTG (committed or otherwise)
for (String fl : TestPseudoReturnFields.ALL_REAL_FIELDS) {
for (int i : Arrays.asList(42, 43, 44, 45, 46, 99)) {
SolrDocument doc = getRandClient(random()).getById(""+i, params("fl",fl));
assertEquals(fl + " => " + doc, 4, doc.size());
assertTrue(fl + " => " + doc, doc.getFieldValue("id") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(fl + " => " + doc, doc.getFieldValue("subject") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("ssto") instanceof String); // TODO: val_ss: List<String>
}
}
}
public void testFilterAndOneRealFieldRTG() throws Exception {
SolrParams params = params("fl","id,val_i",
"fq","{!field f='subject' v=$my_var}",
"my_var","uncommitted");
SolrDocumentList docs = getRandClient(random()).getById(Arrays.asList("42","99"), params);
final String msg = params + " => " + docs;
assertEquals(msg, 1, docs.size());
assertEquals(msg, 1, docs.getNumFound());
SolrDocument doc = docs.get(0);
assertEquals(msg, 2, doc.size());
assertEquals(msg, "99", doc.getFieldValue("id"));
assertEquals(msg, 1, doc.getFieldValue("val_i"));
}
public void testScoreAndAllRealFields() throws Exception {
for (String fl : TestPseudoReturnFields.SCORE_AND_REAL_FIELDS) {
SolrDocumentList docs = assertSearch(params("q", "*:*", "rows", "10", "fl",fl));
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
assertEquals(fl + " => " + doc, 5, doc.size());
assertTrue(fl + " => " + doc, doc.getFieldValue("id") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("score") instanceof Float);
assertTrue(fl + " => " + doc, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(fl + " => " + doc, doc.getFieldValue("subject") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("ssto") instanceof String); // TODO: val_ss: List<String>
}
}
}
public void testScoreAndAllRealFieldsRTG() throws Exception {
// also shouldn't matter if we use RTG (committed or otherwise) .. score should be ignored
for (String fl : TestPseudoReturnFields.SCORE_AND_REAL_FIELDS) {
for (int i : Arrays.asList(42, 43, 44, 45, 46, 99)) {
SolrDocument doc = getRandClient(random()).getById(""+i, params("fl",fl));
assertEquals(fl + " => " + doc, 4, doc.size());
assertTrue(fl + " => " + doc, doc.getFieldValue("id") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(fl + " => " + doc, doc.getFieldValue("subject") instanceof String);
assertTrue(fl + " => " + doc, doc.getFieldValue("ssto") instanceof String); // TODO: val_ss: List<String>
}
}
}
public void testScoreAndExplicitRealFields() throws Exception {
SolrDocumentList docs = null;
SolrDocument doc = null;
for (SolrParams p : Arrays.asList(params("q","*:*", "rows", "1", "fl","score,val_i"),
params("q","*:*", "rows", "1", "fl","score", "fl","val_i"))) {
docs = assertSearch(p);
assertEquals(p + " => " + docs, 5, docs.getNumFound());
doc = docs.get(0); // doesn't really matter which one
assertEquals(p + " => " + doc, 2, doc.size());
assertTrue(p + " => " + doc, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(p + " => " + doc, doc.getFieldValue("score") instanceof Float);
}
docs = assertSearch(params("q","*:*", "rows", "1", "fl","val_i"));
assertEquals("" + docs, 5, docs.getNumFound());
doc = docs.get(0); // doesn't really matter which one
assertEquals("" + doc, 1, doc.size());
assertTrue("" + doc, doc.getFieldValue("val_i") instanceof Integer);
}
public void testScoreAndExplicitRealFieldsRTG() throws Exception {
SolrDocumentList docs = null;
SolrDocument doc = null;
// shouldn't matter if we use RTG (committed or otherwise) .. score should be ignored
for (int i : Arrays.asList(42, 43, 44, 45, 46, 99)) {
for (SolrParams p : Arrays.asList(params("fl","score,val_i"),
params("fl","score", "fl","val_i"))) {
doc = getRandClient(random()).getById(""+i, p);
assertEquals(p + " => " + doc, 1, doc.size());
assertTrue(p + " => " + doc, doc.getFieldValue("val_i") instanceof Integer);
}
}
}
public void testFunctions() throws Exception {
SolrDocumentList docs = assertSearch(params("q","*:*","rows","1","fl","log(val_i)"));
assertEquals(""+docs, 5, docs.getNumFound());
SolrDocument doc = docs.get(0); // doesn't really matter which one
assertEquals(""+doc, 1, doc.size());
assertTrue(""+doc, doc.getFieldValue("log(val_i)") instanceof Double);
for (SolrParams p : Arrays.asList(params("q","*:*", "rows", "1", "fl","log(val_i),abs(val_i)"),
params("q","*:*", "rows", "1", "fl","log(val_i)", "fl","abs(val_i)"))) {
docs = assertSearch(p);
assertEquals(p + " => " + docs, 5, docs.getNumFound());
doc = docs.get(0); // doesn't really matter which one
assertEquals(p + " => " + doc, 2, doc.size());
assertTrue(p + " => " + doc, doc.getFieldValue("log(val_i)") instanceof Double);
assertTrue(p + " => " + doc, doc.getFieldValue("abs(val_i)") instanceof Float);
}
}
public void testFunctionsRTG() throws Exception {
// if we use RTG (committed or otherwise) functions should behave the same
for (String id : Arrays.asList("42","99")) {
for (SolrParams p : Arrays.asList(params("fl","log(val_i),abs(val_i)"),
params("fl","log(val_i)","fl", "abs(val_i)"))) {
SolrDocument doc = getRandClient(random()).getById(id, p);
String msg = id + "," + p + " => " + doc;
assertEquals(msg, 2, doc.size());
assertTrue(msg, doc.getFieldValue("log(val_i)") instanceof Double);
assertTrue(msg, doc.getFieldValue("abs(val_i)") instanceof Float);
// true for both these specific docs
assertEquals(msg, 0.0D, doc.getFieldValue("log(val_i)"));
assertEquals(msg, 1.0F, doc.getFieldValue("abs(val_i)"));
}
}
}
public void testFunctionsAndExplicit() throws Exception {
for (SolrParams p : Arrays.asList(params("q","*:*", "rows", "1", "fl","log(val_i),val_i"),
params("q","*:*", "rows", "1", "fl","log(val_i)", "fl","val_i"))) {
SolrDocumentList docs = assertSearch(p);
assertEquals(p + " => " + docs, 5, docs.getNumFound());
SolrDocument doc = docs.get(0); // doesn't really matter which one
assertEquals(p + " => " + doc, 2, doc.size());
assertTrue(p + " => " + doc, doc.getFieldValue("log(val_i)") instanceof Double);
assertTrue(p + " => " + doc, doc.getFieldValue("val_i") instanceof Integer);
}
}
public void testFunctionsAndExplicitRTG() throws Exception {
// shouldn't matter if we use RTG (committed or otherwise)
for (String id : Arrays.asList("42","99")) {
for (SolrParams p : Arrays.asList(params("fl","log(val_i),val_i"),
params("fl","log(val_i)","fl","val_i"))) {
SolrDocument doc = getRandClient(random()).getById(id, p);
String msg = id + "," + p + " => " + doc;
assertEquals(msg, 2, doc.size());
assertTrue(msg, doc.getFieldValue("log(val_i)") instanceof Double);
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
// true for both these specific docs
assertEquals(msg, 0.0D, doc.getFieldValue("log(val_i)"));
assertEquals(msg, 1, doc.getFieldValue("val_i"));
}
}
}
public void testFunctionsAndScore() throws Exception {
for (SolrParams p : Arrays.asList(params("fl","log(val_i),score"),
params("fl","log(val_i)","fl","score"))) {
SolrDocumentList docs = assertSearch(SolrParams.wrapDefaults(p, params("q", "*:*", "rows", "10")));
assertEquals(p + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
assertEquals(p + " => " + doc, 2, doc.size());
assertTrue(p + " => " + doc, doc.getFieldValue("score") instanceof Float);
assertTrue(p + " => " + doc, doc.getFieldValue("log(val_i)") instanceof Double);
}
}
for (SolrParams p : Arrays.asList(params("fl","log(val_i),abs(val_i),score"),
params("fl","log(val_i),abs(val_i)","fl","score"),
params("fl","log(val_i)","fl","abs(val_i),score"),
params("fl","log(val_i)","fl","abs(val_i)","fl","score"))) {
SolrDocumentList docs = assertSearch(SolrParams.wrapDefaults(p, params("q", "*:*", "rows", "10")));
assertEquals(p + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
assertEquals(p + " => " + doc, 3, doc.size());
assertTrue(p + " => " + doc, doc.getFieldValue("score") instanceof Float);
assertTrue(p + " => " + doc, doc.getFieldValue("abs(val_i)") instanceof Float);
assertTrue(p + " => " + doc, doc.getFieldValue("log(val_i)") instanceof Double);
}
}
}
public void testFunctionsAndScoreRTG() throws Exception {
// if we use RTG (committed or otherwise) score should be ignored
for (String id : Arrays.asList("42","99")) {
for (SolrParams p : Arrays.asList(params("fl","score","fl","log(val_i)","fl","abs(val_i)"),
params("fl","score","fl","log(val_i),abs(val_i)"),
params("fl","score,log(val_i)","fl","abs(val_i)"),
params("fl","score,log(val_i),abs(val_i)"))) {
SolrDocument doc = getRandClient(random()).getById(id, p);
String msg = id + "," + p + " => " + doc;
assertEquals(msg, 2, doc.size());
assertTrue(msg, doc.getFieldValue("log(val_i)") instanceof Double);
assertTrue(msg, doc.getFieldValue("abs(val_i)") instanceof Float);
// true for both these specific docs
assertEquals(msg, 0.0D, doc.getFieldValue("log(val_i)"));
assertEquals(msg, 1.0F, doc.getFieldValue("abs(val_i)"));
}
}
}
public void testGlobs() throws Exception {
SolrDocumentList docs = assertSearch(params("q", "*:*", "rows", "10", "fl","val_*"));
assertEquals(5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
assertEquals(doc.toString(), 1, doc.size());
assertTrue(doc.toString(), doc.getFieldValue("val_i") instanceof Integer);
}
for (SolrParams p : Arrays.asList(params("q", "*:*", "rows", "10", "fl","val_*,subj*,ss*"),
params("q", "*:*", "rows", "10", "fl","val_*","fl","subj*,ss*"),
params("q", "*:*", "rows", "10", "fl","val_*","fl","subj*","fl","ss*"))) {
docs = assertSearch(p);
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
String msg = p + " => " + doc;
assertEquals(msg, 3, doc.size());
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(msg, doc.getFieldValue("subject") instanceof String);
assertTrue(msg, doc.getFieldValue("ssto") instanceof String); // TODO: val_ss: List<String>
assertEquals(msg, "X", doc.getFieldValue("ssto"));
}
}
}
public void testGlobsRTG() throws Exception {
// behavior shouldn't matter if we are committed or uncommitted
for (String id : Arrays.asList("42","99")) {
SolrDocument doc = getRandClient(random()).getById(id, params("fl","val_*"));
String msg = id + ": fl=val_* => " + doc;
assertEquals(msg, 1, doc.size());
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertEquals(msg, 1, doc.getFieldValue("val_i"));
for (SolrParams p : Arrays.asList(params("fl","val_*,subj*,ss*"),
params("fl","val_*","fl","subj*,ss*"))) {
doc = getRandClient(random()).getById(id, p);
msg = id + ": " + p + " => " + doc;
assertEquals(msg, 3, doc.size());
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertEquals(msg, 1, doc.getFieldValue("val_i"));
assertTrue(msg, doc.getFieldValue("subject") instanceof String);
// NOTE: 'subject' is diff between two docs
assertTrue(msg, doc.getFieldValue("ssto") instanceof String); // TODO: val_ss: List<String>
assertEquals(msg, "X", doc.getFieldValue("ssto"));
}
}
}
public void testGlobsAndExplicit() throws Exception {
SolrDocumentList docs = assertSearch(params("q", "*:*", "rows", "10", "fl","val_*,id"));
assertEquals(5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
assertEquals(doc.toString(), 2, doc.size());
assertTrue(doc.toString(), doc.getFieldValue("val_i") instanceof Integer);
assertTrue(doc.toString(), doc.getFieldValue("id") instanceof String);
}
for (SolrParams p : Arrays.asList(params("q", "*:*", "rows", "10", "fl","val_*,subj*,id"),
params("q", "*:*", "rows", "10", "fl","val_*","fl","subj*","fl","id"),
params("q", "*:*", "rows", "10", "fl","val_*","fl","subj*,id"))) {
docs = assertSearch(p);
assertEquals(p + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
String msg = p + " => " + doc;
assertEquals(msg, 3, doc.size());
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(msg, doc.getFieldValue("subject") instanceof String);
assertTrue(msg, doc.getFieldValue("id") instanceof String);
}
}
}
public void testGlobsAndExplicitRTG() throws Exception {
// behavior shouldn't matter if we are committed or uncommitted
for (String id : Arrays.asList("42","99")) {
SolrDocument doc = getRandClient(random()).getById(id, params("fl","val_*,id"));
String msg = id + ": fl=val_*,id => " + doc;
assertEquals(msg, 2, doc.size());
assertTrue(msg, doc.getFieldValue("id") instanceof String);
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertEquals(msg, 1, doc.getFieldValue("val_i"));
for (SolrParams p : Arrays.asList(params("fl","val_*,subj*,id"),
params("fl","val_*","fl","subj*","fl","id"),
params("fl","val_*","fl","subj*,id"))) {
doc = getRandClient(random()).getById(id, p);
msg = id + ": " + p + " => " + doc;
assertEquals(msg, 3, doc.size());
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertEquals(msg, 1, doc.getFieldValue("val_i"));
assertTrue(msg, doc.getFieldValue("subject") instanceof String);
assertTrue(msg, doc.getFieldValue("id") instanceof String);
}
}
}
public void testGlobsAndScore() throws Exception {
SolrDocumentList docs = assertSearch(params("q", "*:*", "rows", "10", "fl","val_*,score"));
assertEquals(5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
assertEquals(doc.toString(), 2, doc.size());
assertTrue(doc.toString(), doc.getFieldValue("val_i") instanceof Integer);
assertTrue(doc.toString(), doc.getFieldValue("score") instanceof Float);
}
for (SolrParams p : Arrays.asList(params("q", "*:*", "rows", "10", "fl","val_*,subj*,score"),
params("q", "*:*", "rows", "10", "fl","val_*","fl","subj*","fl","score"),
params("q", "*:*", "rows", "10", "fl","val_*","fl","subj*,score"))) {
docs = assertSearch(p);
assertEquals(p + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
String msg = p + " => " + doc;
assertEquals(msg, 3, doc.size());
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(msg, doc.getFieldValue("subject") instanceof String);
assertTrue(msg, doc.getFieldValue("score") instanceof Float);
}
}
}
public void testGlobsAndScoreRTG() throws Exception {
// behavior shouldn't matter if we are committed or uncommitted, score should be ignored
for (String id : Arrays.asList("42","99")) {
SolrDocument doc = getRandClient(random()).getById(id, params("fl","val_*,score"));
String msg = id + ": fl=val_*,score => " + doc;
assertEquals(msg, 1, doc.size());
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertEquals(msg, 1, doc.getFieldValue("val_i"));
for (SolrParams p : Arrays.asList(params("fl","val_*,subj*,score"),
params("fl","val_*","fl","subj*","fl","score"),
params("fl","val_*","fl","subj*,score"))) {
doc = getRandClient(random()).getById(id, p);
msg = id + ": " + p + " => " + doc;
assertEquals(msg, 2, doc.size());
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertEquals(msg, 1, doc.getFieldValue("val_i"));
assertTrue(msg, doc.getFieldValue("subject") instanceof String);
}
}
}
public void testAugmenters() throws Exception {
SolrDocumentList docs = assertSearch(params("q", "*:*", "rows", "10", "fl","[docid]"));
assertEquals(5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
assertEquals(doc.toString(), 1, doc.size());
assertTrue(doc.toString(), doc.getFieldValue("[docid]") instanceof Integer);
}
for (SolrParams p : Arrays.asList(params("q","*:*", "fl","[docid],[shard],[explain],x_alias:[value v=10 t=int]"),
params("q","*:*", "fl","[docid],[shard]","fl","[explain],x_alias:[value v=10 t=int]"),
params("q","*:*", "fl","[docid]","fl","[shard]","fl","[explain]","fl","x_alias:[value v=10 t=int]"))) {
docs = assertSearch(p);
assertEquals(p + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
String msg = p + " => " + doc;
assertEquals(msg, 4, doc.size());
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, doc.getFieldValue("[shard]") instanceof String);
assertTrue(msg, doc.getFieldValue("[explain]") instanceof String);
assertTrue(msg, doc.getFieldValue("x_alias") instanceof Integer);
assertEquals(msg, 10, doc.getFieldValue("x_alias"));
}
}
}
public void testDocIdAugmenterRTG() throws Exception {
// for an uncommitted doc, we should get -1
for (String id : Arrays.asList("42","99")) {
SolrDocument doc = getRandClient(random()).getById(id, params("fl","[docid]"));
String msg = id + ": fl=[docid] => " + doc;
assertEquals(msg, 1, doc.size());
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, -1 <= ((Integer)doc.getFieldValue("[docid]")).intValue());
}
}
public void testAugmentersRTG() throws Exception {
// behavior shouldn't matter if we are committed or uncommitted
for (String id : Arrays.asList("42","99")) {
for (SolrParams p : Arrays.asList
(params("fl","[docid],[shard],[explain],x_alias:[value v=10 t=int]"),
params("fl","[docid],[shard]","fl","[explain],x_alias:[value v=10 t=int]"),
params("fl","[docid]","fl","[shard]","fl","[explain]","fl","x_alias:[value v=10 t=int]"))) {
SolrDocument doc = getRandClient(random()).getById(id, p);
String msg = id + ": " + p + " => " + doc;
assertEquals(msg, 3, doc.size());
assertTrue(msg, doc.getFieldValue("[shard]") instanceof String);
// RTG: [explain] should be ignored
assertTrue(msg, doc.getFieldValue("x_alias") instanceof Integer);
assertEquals(msg, 10, doc.getFieldValue("x_alias"));
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, -1 <= ((Integer)doc.getFieldValue("[docid]")).intValue());
}
}
}
public void testAugmentersAndExplicit() throws Exception {
for (SolrParams p : Arrays.asList(params("q", "*:*", "fl","id,[docid],[explain],x_alias:[value v=10 t=int]"),
params("q", "*:*", "fl","id","fl","[docid],[explain],x_alias:[value v=10 t=int]"),
params("q", "*:*", "fl","id","fl","[docid]","fl","[explain]","fl","x_alias:[value v=10 t=int]"))) {
SolrDocumentList docs = assertSearch(p);
assertEquals(p + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
String msg = p + " => " + doc;
assertEquals(msg, 4, doc.size());
assertTrue(msg, doc.getFieldValue("id") instanceof String);
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, doc.getFieldValue("[explain]") instanceof String);
assertTrue(msg, doc.getFieldValue("x_alias") instanceof Integer);
assertEquals(msg, 10, doc.getFieldValue("x_alias"));
}
}
}
public void testAugmentersAndExplicitRTG() throws Exception {
// behavior shouldn't matter if we are committed or uncommitted
for (String id : Arrays.asList("42","99")) {
for (SolrParams p : Arrays.asList(params("fl","id,[docid],[explain],x_alias:[value v=10 t=int]"),
params("fl","id,[docid]","fl","[explain],x_alias:[value v=10 t=int]"),
params("fl","id","fl","[docid]","fl","[explain]","fl","x_alias:[value v=10 t=int]"))) {
SolrDocument doc = getRandClient(random()).getById(id, p);
String msg = id + ": " + p + " => " + doc;
assertEquals(msg, 3, doc.size());
assertTrue(msg, doc.getFieldValue("id") instanceof String);
// RTG: [explain] should be missing (ignored)
assertTrue(msg, doc.getFieldValue("x_alias") instanceof Integer);
assertEquals(msg, 10, doc.getFieldValue("x_alias"));
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, -1 <= ((Integer)doc.getFieldValue("[docid]")).intValue());
}
}
}
public void testAugmentersAndScore() throws Exception {
SolrParams params = params("q","*:*", "fl","[docid],x_alias:[value v=10 t=int],score");
SolrDocumentList docs = assertSearch(params);
assertEquals(params + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
String msg = params + " => " + doc;
assertEquals(msg, 3, doc.size());
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, doc.getFieldValue("x_alias") instanceof Integer);
assertEquals(msg, 10, doc.getFieldValue("x_alias"));
assertTrue(msg, doc.getFieldValue("score") instanceof Float);
}
for (SolrParams p : Arrays.asList(params("q","*:*","fl","[docid],x_alias:[value v=10 t=int],[explain],score"),
params("q","*:*","fl","[docid]","fl","x_alias:[value v=10 t=int],[explain]","fl","score"),
params("q","*:*","fl","[docid]","fl","x_alias:[value v=10 t=int]","fl","[explain]","fl","score"))) {
docs = assertSearch(p);
assertEquals(p + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
String msg = p + " => " + doc;
assertEquals(msg, 4, doc.size());
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, doc.getFieldValue("x_alias") instanceof Integer);
assertEquals(msg, 10, doc.getFieldValue("x_alias"));
assertTrue(msg, doc.getFieldValue("[explain]") instanceof String);
assertTrue(msg, doc.getFieldValue("score") instanceof Float);
}
}
}
public void testAugmentersAndScoreRTG() throws Exception {
// if we use RTG (committed or otherwise) score should be ignored
for (String id : Arrays.asList("42","99")) {
SolrDocument doc = getRandClient(random()).getById(id, params("fl","x_alias:[value v=10 t=int],score"));
String msg = id + " => " + doc;
assertEquals(msg, 1, doc.size());
assertTrue(msg, doc.getFieldValue("x_alias") instanceof Integer);
assertEquals(msg, 10, doc.getFieldValue("x_alias"));
for (SolrParams p : Arrays.asList(params("fl","d_alias:[docid],x_alias:[value v=10 t=int],[explain],score"),
params("fl","d_alias:[docid],x_alias:[value v=10 t=int],[explain]","fl","score"),
params("fl","d_alias:[docid]","fl","x_alias:[value v=10 t=int]","fl","[explain]","fl","score"))) {
doc = getRandClient(random()).getById(id, p);
msg = id + ": " + p + " => " + doc;
assertEquals(msg, 2, doc.size());
assertTrue(msg, doc.getFieldValue("x_alias") instanceof Integer);
assertEquals(msg, 10, doc.getFieldValue("x_alias"));
// RTG: [explain] and score should be missing (ignored)
assertTrue(msg, doc.getFieldValue("d_alias") instanceof Integer);
assertTrue(msg, -1 <= ((Integer)doc.getFieldValue("d_alias")).intValue());
}
}
}
public void testAugmentersGlobsExplicitAndScoreOhMy() throws Exception {
Random random = random();
// NOTE: 'ssto' is the missing one
final List<String> fl = Arrays.asList
("id","[docid]","[explain]","score","val_*","subj*");
final int iters = atLeast(random, 10);
for (int i = 0; i< iters; i++) {
Collections.shuffle(fl, random);
final SolrParams singleFl = params("q","*:*", "rows", "1","fl",StringUtils.join(fl.toArray(),','));
final ModifiableSolrParams multiFl = params("q","*:*", "rows", "1");
for (String item : fl) {
multiFl.add("fl",item);
}
for (SolrParams params : Arrays.asList(singleFl, multiFl)) {
SolrDocumentList docs = assertSearch(params);
assertEquals(params + " => " + docs, 5, docs.getNumFound());
// shouldn't matter what doc we pick...
for (SolrDocument doc : docs) {
String msg = params + " => " + doc;
assertEquals(msg, 6, doc.size());
assertTrue(msg, doc.getFieldValue("id") instanceof String);
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, doc.getFieldValue("[explain]") instanceof String);
assertTrue(msg, doc.getFieldValue("score") instanceof Float);
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertTrue(msg, doc.getFieldValue("subject") instanceof String);
}
}
}
}
public void testAugmentersGlobsExplicitAndScoreOhMyRTG() throws Exception {
Random random = random();
// NOTE: 'ssto' is the missing one
final List<String> fl = Arrays.asList
("id","[docid]","[explain]","score","val_*","subj*");
final int iters = atLeast(random, 10);
for (int i = 0; i< iters; i++) {
Collections.shuffle(fl, random);
final SolrParams singleFl = params("fl",StringUtils.join(fl.toArray(),','));
final ModifiableSolrParams multiFl = params();
for (String item : fl) {
multiFl.add("fl",item);
}
// RTG behavior should be consistent, (committed or otherwise)
for (String id : Arrays.asList("42","99")) {
for (SolrParams params : Arrays.asList(singleFl, multiFl)) {
SolrDocument doc = getRandClient(random()).getById(id, params);
String msg = id + ": " + params + " => " + doc;
assertEquals(msg, 4, doc.size());
assertTrue(msg, doc.getFieldValue("id") instanceof String);
assertTrue(msg, doc.getFieldValue("val_i") instanceof Integer);
assertEquals(msg, 1, doc.getFieldValue("val_i"));
assertTrue(msg, doc.getFieldValue("subject") instanceof String);
assertTrue(msg, doc.getFieldValue("[docid]") instanceof Integer);
assertTrue(msg, -1 <= ((Integer)doc.getFieldValue("[docid]")).intValue());
// RTG: [explain] and score should be missing (ignored)
}
}
}
}
/**
* Given a set of query params, executes as a Query against a random SolrClient and
* asserts that exactly one document is returned
*/
public static SolrDocument assertSearchOneDoc(SolrParams p) throws Exception {
SolrDocumentList docs = assertSearch(p);
assertEquals("does not match exactly one doc: " + p.toString() + " => " + docs.toString(),
1, docs.getNumFound());
assertEquals("does not contain exactly one doc: " + p.toString() + " => " + docs.toString(),
1, docs.size());
return docs.get(0);
}
/**
* Given a set of query params, executes as a Query against a random SolrClient and
* asserts that at least 1 doc is matched and at least 1 doc is returned
*/
public static SolrDocumentList assertSearch(SolrParams p) throws Exception {
QueryResponse rsp = getRandClient(random()).query(p);
assertEquals("failed request: " + p.toString() + " => " + rsp.toString(), 0, rsp.getStatus());
assertTrue("does not match at least one doc: " + p.toString() + " => " + rsp.toString(),
1 <= rsp.getResults().getNumFound());
assertTrue("rsp does not contain at least one doc: " + p.toString() + " => " + rsp.toString(),
1 <= rsp.getResults().size());
return rsp.getResults();
}
/**
* returns a random SolrClient -- either a CloudSolrClient, or an HttpSolrClient pointed
* at a node in our cluster
*/
public static SolrClient getRandClient(Random rand) {
int numClients = CLIENTS.size();
int idx = TestUtil.nextInt(rand, 0, numClients);
return (idx == numClients) ? CLOUD_CLIENT : CLIENTS.get(idx);
}
public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
assert null != client.getDefaultCollection();
AbstractDistribZkTestBase.waitForRecoveriesToFinish(client.getDefaultCollection(),
client.getZkStateReader(),
true, true, 330);
}
}
| 1 | 28,010 | Doc size increased by 1 since _root_ is also returned by queries. | apache-lucene-solr | java |
@@ -822,8 +822,14 @@ def color_intervals(colors, levels, clip=None, N=255):
clmin, clmax = clip
lidx = int(round(N*((clmin-cmin)/interval)))
uidx = int(round(N*((cmax-clmax)/interval)))
- cmap = cmap[lidx:N-uidx]
- return cmap
+ uidx = N-uidx
+ if lidx == uidx:
+ uidx = lidx+1
+ cmap = cmap[lidx:uidx]
+ if clmin == clmax:
+ idx = np.argmin(np.abs(np.array(levels)-clmin))
+ clip = levels[idx: idx+2] if len(levels) > idx+2 else levels[idx-1: idx+1]
+ return cmap, clip
def dim_axis_label(dimensions, separator=', '): | 1 | from __future__ import unicode_literals, absolute_import, division
from collections import defaultdict, namedtuple
import traceback
import warnings
import bisect
import numpy as np
import param
from ..core import (HoloMap, DynamicMap, CompositeOverlay, Layout,
Overlay, GridSpace, NdLayout, Store, NdOverlay)
from ..core.options import Cycle
from ..core.spaces import get_nested_streams
from ..core.util import (match_spec, wrap_tuple, basestring, get_overlay_spec,
unique_iterator, closest_match)
from ..streams import LinkedStream
def displayable(obj):
"""
Predicate that returns whether the object is displayable or not
(i.e whether the object obeys the nesting hierarchy
"""
if isinstance(obj, Overlay) and any(isinstance(o, (HoloMap, GridSpace))
for o in obj):
return False
if isinstance(obj, HoloMap):
return not (obj.type in [Layout, GridSpace, NdLayout, DynamicMap])
if isinstance(obj, (GridSpace, Layout, NdLayout)):
for el in obj.values():
if not displayable(el):
return False
return True
return True
class Warning(param.Parameterized): pass
display_warning = Warning(name='Warning')
def collate(obj):
if isinstance(obj, Overlay):
nested_type = [type(o).__name__ for o in obj
if isinstance(o, (HoloMap, GridSpace))][0]
display_warning.warning("Nesting %ss within an Overlay makes it difficult "
"to access your data or control how it appears; "
"we recommend calling .collate() on the Overlay "
"in order to follow the recommended nesting "
"structure shown in the Composing Data tutorial"
"(http://goo.gl/2YS8LJ)" % nested_type)
return obj.collate()
if isinstance(obj, DynamicMap):
if obj.type in [DynamicMap, HoloMap]:
obj_name = obj.type.__name__
raise Exception("Nesting a %s inside a DynamicMap is not "
"supported. Ensure that the DynamicMap callback "
"returns an Element or (Nd)Overlay. If you have "
"applied an operation ensure it is not dynamic by "
"setting dynamic=False." % obj_name)
return obj.collate()
if isinstance(obj, HoloMap):
display_warning.warning("Nesting {0}s within a {1} makes it difficult "
"to access your data or control how it appears; "
"we recommend calling .collate() on the {1} "
"in order to follow the recommended nesting "
"structure shown in the Composing Data tutorial"
"(https://goo.gl/2YS8LJ)".format(obj.type.__name__, type(obj).__name__))
return obj.collate()
elif isinstance(obj, (Layout, NdLayout)):
try:
display_warning.warning(
"Layout contains HoloMaps which are not nested in the "
"recommended format for accessing your data; calling "
".collate() on these objects will resolve any violations "
"of the recommended nesting presented in the Composing Data "
"tutorial (https://goo.gl/2YS8LJ)")
expanded = []
for el in obj.values():
if isinstance(el, HoloMap) and not displayable(el):
collated_layout = Layout.from_values(el.collate())
expanded.extend(collated_layout.values())
return Layout(expanded)
except:
raise Exception(undisplayable_info(obj))
else:
raise Exception(undisplayable_info(obj))
def isoverlay_fn(obj):
"""
Determines whether object is a DynamicMap returning (Nd)Overlay types.
"""
return isinstance(obj, DynamicMap) and (isinstance(obj.last, CompositeOverlay))
def overlay_depth(obj):
"""
Computes the depth of a DynamicMap overlay if it can be determined
otherwise return None.
"""
if isinstance(obj, DynamicMap):
if isinstance(obj.last, CompositeOverlay):
return len(obj.last)
elif obj.last is None:
return None
return 1
else:
return 1
def compute_overlayable_zorders(obj, path=[]):
"""
Traverses an overlayable composite container to determine which
objects are associated with specific (Nd)Overlay layers by
z-order, making sure to take DynamicMap Callables into
account. Returns a mapping between the zorders of each layer and a
corresponding lists of objects.
Used to determine which overlaid subplots should be linked with
Stream callbacks.
"""
path = path+[obj]
zorder_map = defaultdict(list)
# Process non-dynamic layers
if not isinstance(obj, DynamicMap):
if isinstance(obj, CompositeOverlay):
for z, o in enumerate(obj):
zorder_map[z] = [o, obj]
elif isinstance(obj, HoloMap):
for el in obj.values():
if isinstance(el, CompositeOverlay):
for k, v in compute_overlayable_zorders(el, path).items():
zorder_map[k] += v + [obj]
else:
zorder_map[0] += [obj, el]
else:
if obj not in zorder_map[0]:
zorder_map[0].append(obj)
return zorder_map
isoverlay = isinstance(obj.last, CompositeOverlay)
isdynoverlay = obj.callback._is_overlay
if obj not in zorder_map[0] and not isoverlay:
zorder_map[0].append(obj)
depth = overlay_depth(obj)
# Process the inputs of the DynamicMap callback
dmap_inputs = obj.callback.inputs if obj.callback.link_inputs else []
for z, inp in enumerate(dmap_inputs):
no_zorder_increment = False
if any(not (isoverlay_fn(p) or p.last is None) for p in path) and isoverlay_fn(inp):
# If overlay has been collapsed do not increment zorder
no_zorder_increment = True
input_depth = overlay_depth(inp)
if depth is not None and input_depth is not None and depth < input_depth:
# Skips branch of graph where the number of elements in an
# overlay has been reduced but still contains more than one layer
if depth > 1:
continue
else:
no_zorder_increment = True
# Recurse into DynamicMap.callback.inputs and update zorder_map
z = z if isdynoverlay else 0
deep_zorders = compute_overlayable_zorders(inp, path=path)
offset = max(zorder_map.keys())
for dz, objs in deep_zorders.items():
global_z = offset+z if no_zorder_increment else offset+dz+z
zorder_map[global_z] = list(unique_iterator(zorder_map[global_z]+objs))
# If object branches but does not declare inputs (e.g. user defined
# DynamicMaps returning (Nd)Overlay) add the items on the DynamicMap.last
found = any(isinstance(p, DynamicMap) and p.callback._is_overlay for p in path)
linked = any(isinstance(s, LinkedStream) and s.linked for s in obj.streams)
if (found or linked) and isoverlay and not isdynoverlay:
offset = max(zorder_map.keys())
for z, o in enumerate(obj.last):
if isoverlay and linked:
zorder_map[offset+z].append(obj)
if o not in zorder_map[offset+z]:
zorder_map[offset+z].append(o)
return zorder_map
def is_dynamic_overlay(dmap):
"""
Traverses a DynamicMap graph and determines if any components
were overlaid dynamically (i.e. by * on a DynamicMap).
"""
if not isinstance(dmap, DynamicMap):
return False
elif dmap.callback._is_overlay:
return True
else:
return any(is_dynamic_overlay(dm) for dm in dmap.callback.inputs)
def split_dmap_overlay(obj, depth=0):
"""
Splits a DynamicMap into the original component layers it was
constructed from by traversing the graph to search for dynamically
overlaid components (i.e. constructed by using * on a DynamicMap).
Useful for assigning subplots of an OverlayPlot the streams that
are responsible for driving their updates. Allows the OverlayPlot
to determine if a stream update should redraw a particular
subplot.
"""
layers = []
if isinstance(obj, DynamicMap):
if issubclass(obj.type, NdOverlay) and not depth:
for v in obj.last.values():
layers.append(obj)
elif issubclass(obj.type, Overlay):
if obj.callback.inputs and is_dynamic_overlay(obj):
for inp in obj.callback.inputs:
layers += split_dmap_overlay(inp, depth+1)
else:
for v in obj.last.values():
layers.append(obj)
else:
layers.append(obj)
return layers
if isinstance(obj, Overlay):
for k, v in obj.items():
layers.append(v)
else:
layers.append(obj)
return layers
def initialize_dynamic(obj):
"""
Initializes all DynamicMap objects contained by the object
"""
dmaps = obj.traverse(lambda x: x, specs=[DynamicMap])
for dmap in dmaps:
if dmap.unbounded:
# Skip initialization until plotting code
continue
if not len(dmap):
dmap[dmap._initial_key()]
def get_plot_frame(map_obj, key_map, cached=False):
"""
Returns an item in a HoloMap or DynamicMap given a mapping key
dimensions and their values.
"""
if map_obj.kdims and len(map_obj.kdims) == 1 and map_obj.kdims[0] == 'Frame':
# Special handling for static plots
return map_obj.last
key = tuple(key_map[kd.name] for kd in map_obj.kdims if kd.name in key_map)
if key in map_obj.data and cached:
return map_obj.data[key]
else:
try:
return map_obj[key]
except KeyError:
return None
except StopIteration as e:
raise e
except Exception:
print(traceback.format_exc())
return None
def undisplayable_info(obj, html=False):
"Generate helpful message regarding an undisplayable object"
collate = '<tt>collate</tt>' if html else 'collate'
info = "For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)"
if isinstance(obj, HoloMap):
error = "HoloMap of %s objects cannot be displayed." % obj.type.__name__
remedy = "Please call the %s method to generate a displayable object" % collate
elif isinstance(obj, Layout):
error = "Layout containing HoloMaps of Layout or GridSpace objects cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
elif isinstance(obj, GridSpace):
error = "GridSpace containing HoloMaps of Layouts cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
if not html:
return '\n'.join([error, remedy, info])
else:
return "<center>{msg}</center>".format(msg=('<br>'.join(
['<b>%s</b>' % error, remedy, '<i>%s</i>' % info])))
def compute_sizes(sizes, size_fn, scaling_factor, scaling_method, base_size):
"""
Scales point sizes according to a scaling factor,
base size and size_fn, which will be applied before
scaling.
"""
if sizes.dtype.kind not in ('i', 'f'):
return None
if scaling_method == 'area':
pass
elif scaling_method == 'width':
scaling_factor = scaling_factor**2
else:
raise ValueError(
'Invalid value for argument "scaling_method": "{}". '
'Valid values are: "width", "area".'.format(scaling_method))
sizes = size_fn(sizes)
return (base_size*scaling_factor*sizes)
def get_sideplot_ranges(plot, element, main, ranges):
"""
Utility to find the range for an adjoined
plot given the plot, the element, the
Element the plot is adjoined to and the
dictionary of ranges.
"""
key = plot.current_key
dims = element.dimensions()
dim = dims[0] if 'frequency' in dims[1].name else dims[1]
range_item = main
if isinstance(main, HoloMap):
if issubclass(main.type, CompositeOverlay):
range_item = [hm for hm in main.split_overlays()[1]
if dim in hm.dimensions('all')][0]
else:
range_item = HoloMap({0: main}, kdims=['Frame'])
ranges = match_spec(range_item.last, ranges)
if dim.name in ranges:
main_range = ranges[dim.name]
else:
framewise = plot.lookup_options(range_item.last, 'norm').options.get('framewise')
if framewise and range_item.get(key, False):
main_range = range_item[key].range(dim)
else:
main_range = range_item.range(dim)
# If .main is an NdOverlay or a HoloMap of Overlays get the correct style
if isinstance(range_item, HoloMap):
range_item = range_item.last
if isinstance(range_item, CompositeOverlay):
range_item = [ov for ov in range_item
if dim in ov.dimensions('all')][0]
return range_item, main_range, dim
def within_range(range1, range2):
"""Checks whether range1 is within the range specified by range2."""
range1 = [r if np.isfinite(r) else None for r in range1]
range2 = [r if np.isfinite(r) else None for r in range2]
return ((range1[0] is None or range2[0] is None or range1[0] >= range2[0]) and
(range1[1] is None or range2[1] is None or range1[1] <= range2[1]))
def validate_unbounded_mode(holomaps, dynmaps):
composite = HoloMap(enumerate(holomaps), kdims=['testing_kdim'])
holomap_kdims = set(unique_iterator([kd.name for dm in holomaps for kd in dm.kdims]))
hmranges = {d: composite.range(d) for d in holomap_kdims}
if any(not set(d.name for d in dm.kdims) <= holomap_kdims
for dm in dynmaps):
raise Exception('DynamicMap that are unbounded must have key dimensions that are a '
'subset of dimensions of the HoloMap(s) defining the keys.')
elif not all(within_range(hmrange, dm.range(d)) for dm in dynmaps
for d, hmrange in hmranges.items() if d in dm.kdims):
raise Exception('HoloMap(s) have keys outside the ranges specified on '
'the DynamicMap(s).')
def get_dynamic_mode(composite):
"Returns the common mode of the dynamic maps in given composite object"
dynmaps = composite.traverse(lambda x: x, [DynamicMap])
holomaps = composite.traverse(lambda x: x, ['HoloMap'])
dynamic_unbounded = any(m.unbounded for m in dynmaps)
if holomaps:
validate_unbounded_mode(holomaps, dynmaps)
elif dynamic_unbounded and not holomaps:
raise Exception("DynamicMaps in unbounded mode must be displayed alongside "
"a HoloMap to define the sampling.")
return dynmaps and not holomaps, dynamic_unbounded
def initialize_unbounded(obj, dimensions, key):
"""
Initializes any DynamicMaps in unbounded mode.
"""
select = dict(zip([d.name for d in dimensions], key))
try:
obj.select([DynamicMap], **select)
except KeyError:
pass
def save_frames(obj, filename, fmt=None, backend=None, options=None):
"""
Utility to export object to files frame by frame, numbered individually.
Will use default backend and figure format by default.
"""
backend = Store.current_backend if backend is None else backend
renderer = Store.renderers[backend]
fmt = renderer.params('fig').objects[0] if fmt is None else fmt
plot = renderer.get_plot(obj)
for i in range(len(plot)):
plot.update(i)
renderer.save(plot, '%s_%s' % (filename, i), fmt=fmt, options=options)
def dynamic_update(plot, subplot, key, overlay, items):
"""
Given a plot, subplot and dynamically generated (Nd)Overlay
find the closest matching Element for that plot.
"""
match_spec = get_overlay_spec(overlay,
wrap_tuple(key),
subplot.current_frame)
specs = [(i, get_overlay_spec(overlay, wrap_tuple(k), el))
for i, (k, el) in enumerate(items)]
closest = closest_match(match_spec, specs)
if closest is None:
return closest, None, False
matched = specs[closest][1]
return closest, matched, match_spec == matched
def map_colors(arr, crange, cmap, hex=True):
"""
Maps an array of values to RGB hex strings, given
a color range and colormap.
"""
if isinstance(crange, np.ndarray):
xsorted = np.argsort(crange)
ypos = np.searchsorted(crange, arr)
arr = xsorted[ypos]
else:
if isinstance(crange, tuple):
cmin, cmax = crange
else:
cmin, cmax = np.nanmin(arr), np.nanmax(arr)
arr = (arr - cmin) / (cmax-cmin)
arr = np.ma.array(arr, mask=np.logical_not(np.isfinite(arr)))
arr = cmap(arr)
if hex:
return rgb2hex(arr)
else:
return arr
def mplcmap_to_palette(cmap, ncolors=None, categorical=False):
"""
Converts a matplotlib colormap to palette of RGB hex strings."
"""
from matplotlib.colors import Colormap, ListedColormap
ncolors = ncolors or 256
if not isinstance(cmap, Colormap):
import matplotlib.cm as cm
# Alias bokeh Category cmaps with mpl tab cmaps
if cmap.startswith('Category'):
cmap = cmap.replace('Category', 'tab')
try:
cmap = cm.get_cmap(cmap)
except:
cmap = cm.get_cmap(cmap.lower())
if isinstance(cmap, ListedColormap):
if categorical:
palette = [rgb2hex(cmap.colors[i%cmap.N]) for i in range(ncolors)]
return palette
elif cmap.N > ncolors:
palette = [rgb2hex(c) for c in cmap(np.arange(cmap.N))]
if len(palette) != ncolors:
palette = [palette[int(v)] for v in np.linspace(0, len(palette)-1, ncolors)]
return palette
return [rgb2hex(c) for c in cmap(np.linspace(0, 1, ncolors))]
def bokeh_palette_to_palette(cmap, ncolors=None, categorical=False):
from bokeh import palettes
# Handle categorical colormaps to avoid interpolation
categories = ['accent', 'category', 'dark', 'colorblind', 'pastel',
'set1', 'set2', 'set3', 'paired']
cmap_categorical = any(cat in cmap.lower() for cat in categories)
reverse = False
if cmap.endswith('_r'):
cmap = cmap[:-2]
reverse = True
# Some colormaps are inverted compared to matplotlib
inverted = (not cmap_categorical and not cmap.capitalize() in palettes.mpl)
if inverted:
reverse=not reverse
ncolors = ncolors or 256
# Alias mpl tab cmaps with bokeh Category cmaps
if cmap.startswith('tab'):
cmap = cmap.replace('tab', 'Category')
# Process as bokeh palette
palette = getattr(palettes, cmap, getattr(palettes, cmap.capitalize(), None))
if palette is None:
raise ValueError("Supplied palette %s not found among bokeh palettes" % cmap)
elif isinstance(palette, dict) and (cmap in palette or cmap.capitalize() in palette):
# Some bokeh palettes are doubly nested
palette = palette.get(cmap, palette.get(cmap.capitalize()))
if isinstance(palette, dict):
palette = palette[max(palette)]
if not cmap_categorical:
if len(palette) < ncolors:
palette = polylinear_gradient(palette, ncolors)
elif callable(palette):
palette = palette(ncolors)
if reverse: palette = palette[::-1]
if len(palette) != ncolors:
if categorical and cmap_categorical:
palette = [palette[i%len(palette)] for i in range(ncolors)]
else:
lpad, rpad = -0.5, 0.49999999999
indexes = np.linspace(lpad, (len(palette)-1)+rpad, ncolors)
palette = [palette[int(np.round(v))] for v in indexes]
return palette
def linear_gradient(start_hex, finish_hex, n=10):
"""
Interpolates the color gradient between to hex colors
"""
s = hex2rgb(start_hex)
f = hex2rgb(finish_hex)
gradient = [s]
for t in range(1, n):
curr_vector = [int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
gradient.append(curr_vector)
return [rgb2hex([c/255. for c in rgb]) for rgb in gradient]
def polylinear_gradient(colors, n):
"""
Interpolates the color gradients between a list of hex colors.
"""
n_out = int(float(n) / (len(colors)-1))
gradient = linear_gradient(colors[0], colors[1], n_out)
if len(colors) == len(gradient):
return gradient
for col in range(1, len(colors) - 1):
next_colors = linear_gradient(colors[col], colors[col+1], n_out+1)
gradient += next_colors[1:] if len(next_colors) > 1 else next_colors
return gradient
cmap_info=[]
CMapInfo=namedtuple('CMapInfo',['name','provider','category','source','bg'])
providers = ['matplotlib', 'bokeh', 'colorcet']
def _list_cmaps(provider=None, records=False):
"""
List available colormaps by combining matplotlib, bokeh, and
colorcet colormaps or palettes if available. May also be
narrowed down to a particular provider or list of providers.
"""
if provider is None:
provider = providers
elif isinstance(provider, basestring):
if provider not in providers:
raise ValueError('Colormap provider %r not recognized, must '
'be one of %r' % (provider, providers))
provider = [provider]
cmaps = []
def info(provider,names):
return [CMapInfo(name=n,provider=provider,category=None,source=None,bg=None) for n in names] \
if records else list(names)
if 'matplotlib' in provider:
try:
import matplotlib.cm as cm
cmaps += info('matplotlib',
[cmap for cmap in cm.cmap_d if not
(cmap.startswith('cet_') or # duplicates list below
cmap.startswith('Vega') or # deprecated in matplotlib=2.1
cmap.startswith('spectral') )]) # deprecated in matplotlib=2.1
except:
pass
if 'bokeh' in provider:
try:
from bokeh import palettes
cmaps += info('bokeh', palettes.all_palettes)
cmaps += info('bokeh', [p+'_r' for p in palettes.all_palettes])
except:
pass
if 'colorcet' in provider:
try:
from colorcet import palette_n
cmaps += info('colorcet', palette_n)
cmaps += info('colorcet', [p+'_r' for p in palette_n])
except:
pass
return sorted(unique_iterator(cmaps))
def register_cmaps(category, provider, source, bg, names):
"""
Maintain descriptions of colormaps that include the following information:
name - string name for the colormap
category - intended use or purpose, mostly following matplotlib
provider - package providing the colormap directly
source - original source or creator of the colormaps
bg - base/background color expected for the map
('light','dark','medium','any' (unknown or N/A))
"""
for name in names:
bisect.insort(cmap_info, CMapInfo(name=name, provider=provider,
category=category, source=source,
bg=bg))
def list_cmaps(provider=None, records=False, name=None, category=None, source=None,
bg=None, reverse=None):
"""
Return colormap names matching the specified filters.
"""
# Only uses names actually imported and currently available
available = _list_cmaps(provider=provider, records=True)
matches = set()
for avail in available:
aname=avail.name
matched=False
basename=aname[:-2] if aname.endswith('_r') else aname
if (reverse is None or
(reverse==True and aname.endswith('_r')) or
(reverse==False and not aname.endswith('_r'))):
for r in cmap_info:
if (r.name==basename):
matched=True
# cmap_info stores only non-reversed info, so construct
# suitable values for reversed version if appropriate
r=r._replace(name=aname)
if aname.endswith('_r') and (r.category is not 'Diverging'):
if r.bg=='light':
r=r._replace(bg='dark')
elif r.bg=='dark':
r=r._replace(bg='light')
if (( name is None or name in r.name) and
(provider is None or provider in r.provider) and
(category is None or category in r.category) and
( source is None or source in r.source) and
( bg is None or bg in r.bg)):
matches.add(r)
if not matched and (category is None or category=='Miscellaneous'):
# Return colormaps that exist but are not found in cmap_info
# under the 'Miscellaneous' category, with no source or bg
r = CMapInfo(aname,provider=avail.provider,category='Miscellaneous',source=None,bg=None)
matches.add(r)
# Return results sorted by category if category information is provided
if records:
return list(unique_iterator(sorted(matches,
key=lambda r: (r.category.split(" ")[-1],r.bg,r.name.lower(),r.provider,r.source))))
else:
return list(unique_iterator(sorted([rec.name for rec in matches], key=lambda n:n.lower())))
register_cmaps('Uniform Sequential', 'matplotlib', 'bids', 'dark',
['viridis', 'plasma', 'inferno', 'magma', 'cividis'])
register_cmaps('Mono Sequential', 'matplotlib', 'colorbrewer', 'light',
['Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'light',
['gist_yarg', 'binary'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'dark',
['afmhot', 'gray', 'bone', 'gist_gray', 'gist_heat',
'hot', 'pink'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'any',
['copper', 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia'])
register_cmaps('Diverging', 'matplotlib', 'colorbrewer', 'light',
['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy',
'RdYlBu', 'RdYlGn', 'Spectral'])
register_cmaps('Diverging', 'matplotlib', 'misc', 'light',
['coolwarm', 'bwr', 'seismic'])
register_cmaps('Categorical', 'matplotlib', 'colorbrewer', 'any',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2',
'Set1', 'Set2', 'Set3'])
register_cmaps('Categorical', 'matplotlib', 'd3', 'any',
['tab10', 'tab20', 'tab20b', 'tab20c'])
register_cmaps('Rainbow', 'matplotlib', 'misc', 'dark',
['nipy_spectral', 'gist_ncar'])
register_cmaps('Rainbow', 'matplotlib', 'misc', 'any',
['brg', 'hsv', 'gist_rainbow', 'rainbow', 'jet'])
register_cmaps('Miscellaneous', 'matplotlib', 'misc', 'dark',
['CMRmap', 'cubehelix', 'gist_earth', 'gist_stern',
'gnuplot', 'gnuplot2', 'ocean', 'terrain'])
register_cmaps('Miscellaneous', 'matplotlib', 'misc', 'any',
['flag', 'prism'])
register_cmaps('Uniform Sequential', 'colorcet', 'cet', 'dark',
['bgyw', 'bgy', 'kbc', 'bmw', 'bmy', 'kgy', 'gray',
'dimgray', 'fire'])
register_cmaps('Uniform Sequential', 'colorcet', 'cet', 'any',
['blues', 'kr', 'kg', 'kb'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'light',
['coolwarm', 'gwv'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'dark',
['bkr', 'bky'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'medium',
['bjy'])
register_cmaps('Uniform Rainbow', 'colorcet', 'cet', 'any',
['rainbow', 'colorwheel','isolum'])
register_cmaps('Uniform Sequential', 'bokeh', 'bids', 'dark',
['Viridis', 'Plasma', 'Inferno', 'Magma'])
register_cmaps('Mono Sequential', 'bokeh', 'colorbrewer', 'light',
['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys',
'OrRd', 'Oranges', 'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd'])
register_cmaps('Diverging', 'bokeh', 'colorbrewer', 'light',
['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy',
'RdYlBu', 'RdYlGn', 'Spectral'])
register_cmaps('Categorical', 'bokeh', 'd3', 'any',
['Category10', 'Category20', 'Category20b', 'Category20c'])
register_cmaps('Categorical', 'bokeh', 'colorbrewer', 'any',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2',
'Set1', 'Set2', 'Set3'])
register_cmaps('Categorical', 'bokeh', 'misc', 'any',
['Colorblind'])
def process_cmap(cmap, ncolors=None, provider=None, categorical=False):
"""
Convert valid colormap specifications to a list of colors.
"""
providers_checked="matplotlib, bokeh, or colorcet" if provider is None else provider
if isinstance(cmap, Cycle):
palette = [rgb2hex(c) if isinstance(c, tuple) else c for c in cmap.values]
elif isinstance(cmap, list):
palette = cmap
elif isinstance(cmap, basestring):
mpl_cmaps = _list_cmaps('matplotlib')
bk_cmaps = _list_cmaps('bokeh')
cet_cmaps = _list_cmaps('colorcet')
if provider=='matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)):
palette = mplcmap_to_palette(cmap, ncolors, categorical)
elif provider=='bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)):
palette = bokeh_palette_to_palette(cmap, ncolors, categorical)
elif provider=='colorcet' or (provider is None and cmap in cet_cmaps):
from colorcet import palette
if cmap.endswith('_r'):
palette = list(reversed(palette[cmap[:-2]]))
else:
palette = palette[cmap]
else:
raise ValueError("Supplied cmap %s not found among %s colormaps." %
(cmap,providers_checked))
else:
try:
# Try processing as matplotlib colormap
palette = mplcmap_to_palette(cmap, ncolors)
except:
palette = None
if not isinstance(palette, list):
raise TypeError("cmap argument %s expects a list, Cycle or valid %s colormap or palette."
% (cmap,providers_checked))
if ncolors and len(palette) != ncolors:
return [palette[i%len(palette)] for i in range(ncolors)]
return palette
def color_intervals(colors, levels, clip=None, N=255):
"""
Maps a set of intervals to colors given a fixed color range.
"""
if len(colors) != len(levels)-1:
raise ValueError('The number of colors in the colormap '
'must match the intervals defined in the '
'color_levels, expected %d colors found %d.'
% (N, len(colors)))
intervals = np.diff(levels)
cmin, cmax = min(levels), max(levels)
interval = cmax-cmin
cmap = []
for intv, c in zip(intervals, colors):
cmap += [c]*int(round(N*(intv/interval)))
if clip is not None:
clmin, clmax = clip
lidx = int(round(N*((clmin-cmin)/interval)))
uidx = int(round(N*((cmax-clmax)/interval)))
cmap = cmap[lidx:N-uidx]
return cmap
def dim_axis_label(dimensions, separator=', '):
"""
Returns an axis label for one or more dimensions.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
return separator.join([d.pprint_label for d in dimensions])
def attach_streams(plot, obj, precedence=1.1):
"""
Attaches plot refresh to all streams on the object.
"""
def append_refresh(dmap):
for stream in get_nested_streams(dmap):
if plot.refresh not in stream._subscribers:
stream.add_subscriber(plot.refresh, precedence)
return obj.traverse(append_refresh, [DynamicMap])
def traverse_setter(obj, attribute, value):
"""
Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types.
"""
obj.traverse(lambda x: setattr(x, attribute, value))
def _get_min_distance_numpy(element):
"""
NumPy based implementation of get_min_distance
"""
xys = element.array([0, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered in')
xys = xys.astype('float32').view(np.complex64)
distances = np.abs(xys.T-xys)
np.fill_diagonal(distances, np.inf)
distances = distances[distances>0]
if len(distances):
return distances.min()
return 0
def get_min_distance(element):
"""
Gets the minimum sampling distance of the x- and y-coordinates
in a grid.
"""
try:
from scipy.spatial.distance import pdist
return pdist(element.array([0, 1])).min()
except:
return _get_min_distance_numpy(element)
def rgb2hex(rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb))
def hex2rgb(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
COLOR_ALIASES = {
'b': (0, 0, 1),
'c': (0, 0.75, 0.75),
'g': (0, 0.5, 0),
'k': (0, 0, 0),
'm': (0.75, 0, 0.75),
'r': (1, 0, 0),
'w': (1, 1, 1),
'y': (0.75, 0.75, 0),
'transparent': (0, 0, 0, 0)
}
# linear_kryw_0_100_c71 (aka "fire"):
# A perceptually uniform equivalent of matplotlib's "hot" colormap, from
# http://peterkovesi.com/projects/colourmaps
fire_colors = linear_kryw_0_100_c71 = [\
[0, 0, 0 ], [0.027065, 2.143e-05, 0 ],
[0.052054, 7.4728e-05, 0 ], [0.071511, 0.00013914, 0 ],
[0.08742, 0.0002088, 0 ], [0.10109, 0.00028141, 0 ],
[0.11337, 0.000356, 2.4266e-17], [0.12439, 0.00043134, 3.3615e-17],
[0.13463, 0.00050796, 2.1604e-17], [0.14411, 0.0005856, 0 ],
[0.15292, 0.00070304, 0 ], [0.16073, 0.0013432, 0 ],
[0.16871, 0.0014516, 0 ], [0.17657, 0.0012408, 0 ],
[0.18364, 0.0015336, 0 ], [0.19052, 0.0017515, 0 ],
[0.19751, 0.0015146, 0 ], [0.20401, 0.0015249, 0 ],
[0.20994, 0.0019639, 0 ], [0.21605, 0.002031, 0 ],
[0.22215, 0.0017559, 0 ], [0.22808, 0.001546, 1.8755e-05],
[0.23378, 0.0016315, 3.5012e-05], [0.23955, 0.0017194, 3.3352e-05],
[0.24531, 0.0018097, 1.8559e-05], [0.25113, 0.0019038, 1.9139e-05],
[0.25694, 0.0020015, 3.5308e-05], [0.26278, 0.0021017, 3.2613e-05],
[0.26864, 0.0022048, 2.0338e-05], [0.27451, 0.0023119, 2.2453e-05],
[0.28041, 0.0024227, 3.6003e-05], [0.28633, 0.0025363, 2.9817e-05],
[0.29229, 0.0026532, 1.9559e-05], [0.29824, 0.0027747, 2.7666e-05],
[0.30423, 0.0028999, 3.5752e-05], [0.31026, 0.0030279, 2.3231e-05],
[0.31628, 0.0031599, 1.2902e-05], [0.32232, 0.0032974, 3.2915e-05],
[0.32838, 0.0034379, 3.2803e-05], [0.33447, 0.0035819, 2.0757e-05],
[0.34057, 0.003731, 2.3831e-05], [0.34668, 0.0038848, 3.502e-05 ],
[0.35283, 0.0040418, 2.4468e-05], [0.35897, 0.0042032, 1.1444e-05],
[0.36515, 0.0043708, 3.2793e-05], [0.37134, 0.0045418, 3.012e-05 ],
[0.37756, 0.0047169, 1.4846e-05], [0.38379, 0.0048986, 2.796e-05 ],
[0.39003, 0.0050848, 3.2782e-05], [0.3963, 0.0052751, 1.9244e-05],
[0.40258, 0.0054715, 2.2667e-05], [0.40888, 0.0056736, 3.3223e-05],
[0.41519, 0.0058798, 2.159e-05 ], [0.42152, 0.0060922, 1.8214e-05],
[0.42788, 0.0063116, 3.2525e-05], [0.43424, 0.0065353, 2.2247e-05],
[0.44062, 0.006765, 1.5852e-05], [0.44702, 0.0070024, 3.1769e-05],
[0.45344, 0.0072442, 2.1245e-05], [0.45987, 0.0074929, 1.5726e-05],
[0.46631, 0.0077499, 3.0976e-05], [0.47277, 0.0080108, 1.8722e-05],
[0.47926, 0.0082789, 1.9285e-05], [0.48574, 0.0085553, 3.0063e-05],
[0.49225, 0.0088392, 1.4313e-05], [0.49878, 0.0091356, 2.3404e-05],
[0.50531, 0.0094374, 2.8099e-05], [0.51187, 0.0097365, 6.4695e-06],
[0.51844, 0.010039, 2.5791e-05], [0.52501, 0.010354, 2.4393e-05],
[0.53162, 0.010689, 1.6037e-05], [0.53825, 0.011031, 2.7295e-05],
[0.54489, 0.011393, 1.5848e-05], [0.55154, 0.011789, 2.3111e-05],
[0.55818, 0.012159, 2.5416e-05], [0.56485, 0.012508, 1.5064e-05],
[0.57154, 0.012881, 2.541e-05 ], [0.57823, 0.013283, 1.6166e-05],
[0.58494, 0.013701, 2.263e-05 ], [0.59166, 0.014122, 2.3316e-05],
[0.59839, 0.014551, 1.9432e-05], [0.60514, 0.014994, 2.4323e-05],
[0.6119, 0.01545, 1.3929e-05], [0.61868, 0.01592, 2.1615e-05],
[0.62546, 0.016401, 1.5846e-05], [0.63226, 0.016897, 2.0838e-05],
[0.63907, 0.017407, 1.9549e-05], [0.64589, 0.017931, 2.0961e-05],
[0.65273, 0.018471, 2.0737e-05], [0.65958, 0.019026, 2.0621e-05],
[0.66644, 0.019598, 2.0675e-05], [0.67332, 0.020187, 2.0301e-05],
[0.68019, 0.020793, 2.0029e-05], [0.68709, 0.021418, 2.0088e-05],
[0.69399, 0.022062, 1.9102e-05], [0.70092, 0.022727, 1.9662e-05],
[0.70784, 0.023412, 1.7757e-05], [0.71478, 0.024121, 1.8236e-05],
[0.72173, 0.024852, 1.4944e-05], [0.7287, 0.025608, 2.0245e-06],
[0.73567, 0.02639, 1.5013e-07], [0.74266, 0.027199, 0 ],
[0.74964, 0.028038, 0 ], [0.75665, 0.028906, 0 ],
[0.76365, 0.029806, 0 ], [0.77068, 0.030743, 0 ],
[0.77771, 0.031711, 0 ], [0.78474, 0.032732, 0 ],
[0.79179, 0.033741, 0 ], [0.79886, 0.034936, 0 ],
[0.80593, 0.036031, 0 ], [0.81299, 0.03723, 0 ],
[0.82007, 0.038493, 0 ], [0.82715, 0.039819, 0 ],
[0.83423, 0.041236, 0 ], [0.84131, 0.042647, 0 ],
[0.84838, 0.044235, 0 ], [0.85545, 0.045857, 0 ],
[0.86252, 0.047645, 0 ], [0.86958, 0.049578, 0 ],
[0.87661, 0.051541, 0 ], [0.88365, 0.053735, 0 ],
[0.89064, 0.056168, 0 ], [0.89761, 0.058852, 0 ],
[0.90451, 0.061777, 0 ], [0.91131, 0.065281, 0 ],
[0.91796, 0.069448, 0 ], [0.92445, 0.074684, 0 ],
[0.93061, 0.08131, 0 ], [0.93648, 0.088878, 0 ],
[0.94205, 0.097336, 0 ], [0.9473, 0.10665, 0 ],
[0.9522, 0.1166, 0 ], [0.95674, 0.12716, 0 ],
[0.96094, 0.13824, 0 ], [0.96479, 0.14963, 0 ],
[0.96829, 0.16128, 0 ], [0.97147, 0.17303, 0 ],
[0.97436, 0.18489, 0 ], [0.97698, 0.19672, 0 ],
[0.97934, 0.20846, 0 ], [0.98148, 0.22013, 0 ],
[0.9834, 0.23167, 0 ], [0.98515, 0.24301, 0 ],
[0.98672, 0.25425, 0 ], [0.98815, 0.26525, 0 ],
[0.98944, 0.27614, 0 ], [0.99061, 0.28679, 0 ],
[0.99167, 0.29731, 0 ], [0.99263, 0.30764, 0 ],
[0.9935, 0.31781, 0 ], [0.99428, 0.3278, 0 ],
[0.995, 0.33764, 0 ], [0.99564, 0.34735, 0 ],
[0.99623, 0.35689, 0 ], [0.99675, 0.3663, 0 ],
[0.99722, 0.37556, 0 ], [0.99765, 0.38471, 0 ],
[0.99803, 0.39374, 0 ], [0.99836, 0.40265, 0 ],
[0.99866, 0.41145, 0 ], [0.99892, 0.42015, 0 ],
[0.99915, 0.42874, 0 ], [0.99935, 0.43724, 0 ],
[0.99952, 0.44563, 0 ], [0.99966, 0.45395, 0 ],
[0.99977, 0.46217, 0 ], [0.99986, 0.47032, 0 ],
[0.99993, 0.47838, 0 ], [0.99997, 0.48638, 0 ],
[1, 0.4943, 0 ], [1, 0.50214, 0 ],
[1, 0.50991, 1.2756e-05], [1, 0.51761, 4.5388e-05],
[1, 0.52523, 9.6977e-05], [1, 0.5328, 0.00016858],
[1, 0.54028, 0.0002582 ], [1, 0.54771, 0.00036528],
[1, 0.55508, 0.00049276], [1, 0.5624, 0.00063955],
[1, 0.56965, 0.00080443], [1, 0.57687, 0.00098902],
[1, 0.58402, 0.0011943 ], [1, 0.59113, 0.0014189 ],
[1, 0.59819, 0.0016626 ], [1, 0.60521, 0.0019281 ],
[1, 0.61219, 0.0022145 ], [1, 0.61914, 0.0025213 ],
[1, 0.62603, 0.0028496 ], [1, 0.6329, 0.0032006 ],
[1, 0.63972, 0.0035741 ], [1, 0.64651, 0.0039701 ],
[1, 0.65327, 0.0043898 ], [1, 0.66, 0.0048341 ],
[1, 0.66669, 0.005303 ], [1, 0.67336, 0.0057969 ],
[1, 0.67999, 0.006317 ], [1, 0.68661, 0.0068648 ],
[1, 0.69319, 0.0074406 ], [1, 0.69974, 0.0080433 ],
[1, 0.70628, 0.0086756 ], [1, 0.71278, 0.0093486 ],
[1, 0.71927, 0.010023 ], [1, 0.72573, 0.010724 ],
[1, 0.73217, 0.011565 ], [1, 0.73859, 0.012339 ],
[1, 0.74499, 0.01316 ], [1, 0.75137, 0.014042 ],
[1, 0.75772, 0.014955 ], [1, 0.76406, 0.015913 ],
[1, 0.77039, 0.016915 ], [1, 0.77669, 0.017964 ],
[1, 0.78298, 0.019062 ], [1, 0.78925, 0.020212 ],
[1, 0.7955, 0.021417 ], [1, 0.80174, 0.02268 ],
[1, 0.80797, 0.024005 ], [1, 0.81418, 0.025396 ],
[1, 0.82038, 0.026858 ], [1, 0.82656, 0.028394 ],
[1, 0.83273, 0.030013 ], [1, 0.83889, 0.031717 ],
[1, 0.84503, 0.03348 ], [1, 0.85116, 0.035488 ],
[1, 0.85728, 0.037452 ], [1, 0.8634, 0.039592 ],
[1, 0.86949, 0.041898 ], [1, 0.87557, 0.044392 ],
[1, 0.88165, 0.046958 ], [1, 0.88771, 0.04977 ],
[1, 0.89376, 0.052828 ], [1, 0.8998, 0.056209 ],
[1, 0.90584, 0.059919 ], [1, 0.91185, 0.063925 ],
[1, 0.91783, 0.068579 ], [1, 0.92384, 0.073948 ],
[1, 0.92981, 0.080899 ], [1, 0.93576, 0.090648 ],
[1, 0.94166, 0.10377 ], [1, 0.94752, 0.12051 ],
[1, 0.9533, 0.14149 ], [1, 0.959, 0.1672 ],
[1, 0.96456, 0.19823 ], [1, 0.96995, 0.23514 ],
[1, 0.9751, 0.2786 ], [1, 0.97992, 0.32883 ],
[1, 0.98432, 0.38571 ], [1, 0.9882, 0.44866 ],
[1, 0.9915, 0.51653 ], [1, 0.99417, 0.58754 ],
[1, 0.99625, 0.65985 ], [1, 0.99778, 0.73194 ],
[1, 0.99885, 0.80259 ], [1, 0.99953, 0.87115 ],
[1, 0.99989, 0.93683 ], [1, 1, 1 ]]
# Bokeh palette
fire = [str('#{0:02x}{1:02x}{2:02x}'.format(int(r*255),int(g*255),int(b*255)))
for r,g,b in fire_colors]
# Matplotlib colormap
try:
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.cm import register_cmap
fire_cmap = LinearSegmentedColormap.from_list("fire", fire_colors, N=len(fire_colors))
fire_r_cmap = LinearSegmentedColormap.from_list("fire_r", list(reversed(fire_colors)), N=len(fire_colors))
register_cmap("fire", cmap=fire_cmap)
register_cmap("fire_r", cmap=fire_r_cmap)
except ImportError:
pass
| 1 | 21,545 | Now the return value has changed (or at least now includes `clip`) it might be worth updating the docstring... | holoviz-holoviews | py |
@@ -411,7 +411,7 @@ func TestRollDPoSConsensus(t *testing.T) {
require.NoError(t, err)
require.NoError(t, sf.Start(ctx))
for j := 0; j < numNodes; j++ {
- ws, err := sf.NewWorkingSet()
+ ws, err := sf.NewWorkingSet(nil)
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, chainRawAddrs[j], big.NewInt(0))
require.NoError(t, err) | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rolldpos
import (
"encoding/hex"
"fmt"
"math/big"
"net"
"sync"
"testing"
"time"
"github.com/facebookgo/clock"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
cp "github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/endorsement"
"github.com/iotexproject/iotex-core/p2p/node"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_actpool"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/testutil"
)
type addrKeyPair struct {
priKey crypto.PrivateKey
encodedAddr string
}
func TestNewRollDPoS(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cfg := config.Default
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
t.Run("normal", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
})
t.Run("mock-clock", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock.NewMock()).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
_, ok := r.ctx.clock.(*clock.Mock)
assert.True(t, ok)
})
t.Run("root chain API", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock.NewMock()).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
})
t.Run("missing-dep", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
RegisterProtocol(rp).
Build()
assert.Error(t, err)
assert.Nil(t, r)
})
}
func makeBlock(t *testing.T, accountIndex, numOfEndosements int, makeInvalidEndorse bool, height int) *block.Block {
unixTime := 1500000000
blkTime := int64(-1)
if height != 9 {
height = 9
blkTime = int64(-7723372030)
}
timeT := time.Unix(blkTime, 0)
rap := block.RunnableActionsBuilder{}
ra := rap.
SetHeight(uint64(height)).
SetTimeStamp(timeT).
Build(identityset.PrivateKey(accountIndex).PublicKey())
blk, err := block.NewBuilder(ra).
SetVersion(1).
SetReceiptRoot(hash.Hash256b([]byte("hello, world!"))).
SetDeltaStateDigest(hash.Hash256b([]byte("world, hello!"))).
SetPrevBlockHash(hash.Hash256b([]byte("hello, block!"))).
SignAndBuild(identityset.PrivateKey(accountIndex))
require.NoError(t, err)
footerForBlk := &block.Footer{}
typesFooter := iotextypes.BlockFooter{}
for i := 0; i < numOfEndosements; i++ {
timeTime := time.Unix(int64(unixTime), 0)
hs := blk.HashBlock()
var consensusVote *ConsensusVote
if makeInvalidEndorse {
consensusVote = NewConsensusVote(hs[:], LOCK)
} else {
consensusVote = NewConsensusVote(hs[:], COMMIT)
}
en, err := endorsement.Endorse(identityset.PrivateKey(i), consensusVote, timeTime)
require.NoError(t, err)
enProto, err := en.Proto()
require.NoError(t, err)
typesFooter.Endorsements = append(typesFooter.Endorsements, enProto)
}
ts, err := ptypes.TimestampProto(time.Unix(int64(unixTime), 0))
require.NoError(t, err)
typesFooter.Timestamp = ts
require.NotNil(t, typesFooter.Timestamp)
err = footerForBlk.ConvertFromBlockFooterPb(&typesFooter)
require.NoError(t, err)
blk.Footer = *footerForBlk
return &blk
}
func TestValidateBlockFooter(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
candidates := make([]string, 5)
for i := 0; i < len(candidates); i++ {
candidates[i] = identityset.Address(i).String()
}
clock := clock.NewMock()
blockHeight := uint64(8)
footer := &block.Footer{}
blockchain := mock_blockchain.NewMockBlockchain(ctrl)
blockchain.EXPECT().BlockFooterByHeight(blockHeight).Return(footer, nil).Times(5)
blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return([]*state.Candidate{
{Address: candidates[0]},
{Address: candidates[1]},
{Address: candidates[2]},
{Address: candidates[3]},
{Address: candidates[4]},
}, nil).AnyTimes()
sk1 := identityset.PrivateKey(1)
cfg := config.Default
cfg.Genesis.NumDelegates = 4
cfg.Genesis.NumSubEpochs = 1
cfg.Genesis.BlockInterval = 10 * time.Second
cfg.Genesis.Timestamp = int64(1500000000)
blockchain.EXPECT().Genesis().Return(cfg.Genesis).Times(5)
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(1).String()).
SetPriKey(sk1).
SetChainManager(blockchain).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
require.NotNil(t, r)
// all right
blk := makeBlock(t, 1, 4, false, 9)
err = r.ValidateBlockFooter(blk)
require.NoError(t, err)
// Proposer is wrong
blk = makeBlock(t, 0, 4, false, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// Not enough endorsements
blk = makeBlock(t, 1, 2, false, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// round information is wrong
blk = makeBlock(t, 1, 4, false, 0)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// Some endorsement is invalid
blk = makeBlock(t, 1, 4, true, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
}
func TestRollDPoS_Metrics(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
candidates := make([]string, 5)
for i := 0; i < len(candidates); i++ {
candidates[i] = identityset.Address(i).String()
}
clock := clock.NewMock()
blockHeight := uint64(8)
footer := &block.Footer{}
blockchain := mock_blockchain.NewMockBlockchain(ctrl)
blockchain.EXPECT().TipHeight().Return(blockHeight).Times(1)
blockchain.EXPECT().BlockFooterByHeight(blockHeight).Return(footer, nil).Times(2)
blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return([]*state.Candidate{
{Address: candidates[0]},
{Address: candidates[1]},
{Address: candidates[2]},
{Address: candidates[3]},
{Address: candidates[4]},
}, nil).AnyTimes()
sk1 := identityset.PrivateKey(1)
cfg := config.Default
cfg.Genesis.NumDelegates = 4
cfg.Genesis.NumSubEpochs = 1
cfg.Genesis.BlockInterval = 10 * time.Second
cfg.Genesis.Timestamp = int64(1500000000)
blockchain.EXPECT().Genesis().Return(cfg.Genesis).Times(2)
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(1).String()).
SetPriKey(sk1).
SetChainManager(blockchain).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
require.NotNil(t, r)
clock.Add(r.ctx.BlockInterval(blockHeight))
require.NoError(t, r.ctx.Start(context.Background()))
r.ctx.round, err = r.ctx.RoundCalc().UpdateRound(r.ctx.round, blockHeight+1, r.ctx.BlockInterval(blockHeight+1), clock.Now(), 2*time.Second)
require.NoError(t, err)
m, err := r.Metrics()
require.NoError(t, err)
assert.Equal(t, uint64(3), m.LatestEpoch)
cp.SortCandidates(candidates, rp.GetEpochHeight(m.LatestEpoch), cp.CryptoSeed)
assert.Equal(t, candidates[:4], m.LatestDelegates)
assert.Equal(t, candidates[1], m.LatestBlockProducer)
}
// E2E RollDPoS tests bellow
type directOverlay struct {
addr net.Addr
peers map[net.Addr]*RollDPoS
}
func (o *directOverlay) Start(_ context.Context) error { return nil }
func (o *directOverlay) Stop(_ context.Context) error { return nil }
func (o *directOverlay) Broadcast(msg proto.Message) error {
// Only broadcast consensus message
if cMsg, ok := msg.(*iotextypes.ConsensusMessage); ok {
for _, r := range o.peers {
if err := r.HandleConsensusMsg(cMsg); err != nil {
return errors.Wrap(err, "error when handling consensus message directly")
}
}
}
return nil
}
func (o *directOverlay) Tell(uint32, net.Addr, proto.Message) error { return nil }
func (o *directOverlay) Self() net.Addr { return o.addr }
func (o *directOverlay) GetPeers() []net.Addr {
addrs := make([]net.Addr, 0, len(o.peers))
for addr := range o.peers {
addrs = append(addrs, addr)
}
return addrs
}
func TestRollDPoSConsensus(t *testing.T) {
newConsensusComponents := func(numNodes int) ([]*RollDPoS, []*directOverlay, []blockchain.Blockchain) {
cfg := config.Default
cfg.Consensus.RollDPoS.ConsensusDBPath = ""
cfg.Consensus.RollDPoS.Delay = 300 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptBlockTTL = 800 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptProposalEndorsementTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptLockEndorsementTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.CommitTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.UnmatchedEventTTL = time.Second
cfg.Consensus.RollDPoS.FSM.UnmatchedEventInterval = 10 * time.Millisecond
cfg.Consensus.RollDPoS.ToleratedOvertime = 200 * time.Millisecond
cfg.Genesis.BlockInterval = 2 * time.Second
cfg.Genesis.Blockchain.NumDelegates = uint64(numNodes)
cfg.Genesis.Blockchain.NumSubEpochs = 1
cfg.Genesis.EnableGravityChainVoting = false
chainAddrs := make([]*addrKeyPair, 0, numNodes)
networkAddrs := make([]net.Addr, 0, numNodes)
for i := 0; i < numNodes; i++ {
sk := identityset.PrivateKey(i)
addr := addrKeyPair{
encodedAddr: identityset.Address(i).String(),
priKey: sk,
}
chainAddrs = append(chainAddrs, &addr)
networkAddrs = append(networkAddrs, node.NewTCPNode(fmt.Sprintf("127.0.0.%d:4689", i+1)))
}
chainRawAddrs := make([]string, 0, numNodes)
addressMap := make(map[string]*addrKeyPair)
for _, addr := range chainAddrs {
chainRawAddrs = append(chainRawAddrs, addr.encodedAddr)
addressMap[addr.encodedAddr] = addr
}
cp.SortCandidates(chainRawAddrs, 1, cp.CryptoSeed)
for i, rawAddress := range chainRawAddrs {
chainAddrs[i] = addressMap[rawAddress]
}
candidatesByHeightFunc := func(_ uint64) ([]*state.Candidate, error) {
candidates := make([]*state.Candidate, 0, numNodes)
for _, addr := range chainAddrs {
candidates = append(candidates, &state.Candidate{Address: addr.encodedAddr})
}
return candidates, nil
}
chains := make([]blockchain.Blockchain, 0, numNodes)
p2ps := make([]*directOverlay, 0, numNodes)
cs := make([]*RollDPoS, 0, numNodes)
for i := 0; i < numNodes; i++ {
ctx := context.Background()
cfg.Chain.ProducerPrivKey = hex.EncodeToString(chainAddrs[i].priKey.Bytes())
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
require.NoError(t, err)
require.NoError(t, sf.Start(ctx))
for j := 0; j < numNodes; j++ {
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, chainRawAddrs[j], big.NewInt(0))
require.NoError(t, err)
gasLimit := testutil.TestGasLimit
wsctx := protocol.WithRunActionsCtx(ctx,
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
Genesis: cfg.Genesis,
})
_, err = ws.RunActions(wsctx, 0, nil)
require.NoError(t, err)
require.NoError(t, sf.Commit(ws))
}
registry := protocol.Registry{}
acc := account.NewProtocol()
require.NoError(t, registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
chain := blockchain.NewBlockchain(
cfg,
nil,
blockchain.InMemDaoOption(),
blockchain.PrecreatedStateFactoryOption(sf),
blockchain.RegistryOption(®istry),
)
chain.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(chain.Factory().Nonce))
chain.Validator().AddActionValidators(account.NewProtocol())
chains = append(chains, chain)
actPool, err := actpool.NewActPool(chain, cfg.ActPool, actpool.EnableExperimentalActions())
require.NoError(t, err)
p2p := &directOverlay{
addr: networkAddrs[i],
peers: make(map[net.Addr]*RollDPoS),
}
p2ps = append(p2ps, p2p)
consensus, err := NewRollDPoSBuilder().
SetAddr(chainAddrs[i].encodedAddr).
SetPriKey(chainAddrs[i].priKey).
SetConfig(cfg).
SetChainManager(chain).
SetActPool(actPool).
SetBroadcast(p2p.Broadcast).
SetCandidatesByHeightFunc(candidatesByHeightFunc).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
cs = append(cs, consensus)
}
for i := 0; i < numNodes; i++ {
for j := 0; j < numNodes; j++ {
if i != j {
p2ps[i].peers[p2ps[j].addr] = cs[j]
}
}
}
return cs, p2ps, chains
}
t.Run("1-block", func(t *testing.T) {
// TODO: fix and enable the test
t.Skip()
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 10*time.Second, func() (bool, error) {
for _, chain := range chains {
if chain.TipHeight() < 1 {
return false, nil
}
}
return true, nil
}))
})
t.Run("1-epoch", func(t *testing.T) {
if testing.Short() {
t.Skip("Skip the 1-epoch test in short mode.")
}
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 100*time.Second, func() (bool, error) {
for _, chain := range chains {
if chain.TipHeight() < 48 {
return false, nil
}
}
return true, nil
}))
})
t.Run("network-partition-time-rotation", func(t *testing.T) {
// TODO: fix and enable the test
t.Skip()
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 1 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[1].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
cs[idx].ctx.roundCalc.timeBasedRotation = true
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 60*time.Second, func() (bool, error) {
for i, chain := range chains {
if i == 1 {
continue
}
if chain.TipHeight() < 4 {
return false, nil
}
}
return true, nil
}))
})
t.Run("proposer-network-partition-blocking", func(t *testing.T) {
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 1 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[1].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
time.Sleep(5 * time.Second)
for _, chain := range chains {
header, err := chain.BlockHeaderByHeight(1)
assert.Nil(t, header)
assert.Error(t, err)
}
})
t.Run("non-proposer-network-partition-blocking", func(t *testing.T) {
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 0 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[0].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 60*time.Second, func() (bool, error) {
for i, chain := range chains {
if i == 0 {
continue
}
if chain.TipHeight() < 2 {
return false, nil
}
}
return true, nil
}))
for i, chain := range chains {
header, err := chain.BlockHeaderByHeight(1)
if i == 0 {
assert.Nil(t, header)
assert.Error(t, err)
} else {
assert.NotNil(t, header)
assert.NoError(t, err)
}
}
})
}
| 1 | 19,845 | shadow: declaration of "err" shadows declaration at line 410 (from `govet`) | iotexproject-iotex-core | go |
@@ -82,7 +82,13 @@ class Flashmessages extends AbstractHelper
$this->fm->getMessages($ns), $this->fm->getCurrentMessages($ns)
);
foreach (array_unique($messages, SORT_REGULAR) as $msg) {
- $html .= '<div class="' . $this->getClassForNamespace($ns) . '">';
+ $html .= '<div class="' . $this->getClassForNamespace($ns) . '"';
+ if (isset($msg['dataset'])) {
+ foreach ($msg['dataset'] as $attr => $value) {
+ $html .= ' data-' . $attr . '="' . $value . '"';
+ }
+ }
+ $html .= '>';
// Advanced form:
if (is_array($msg)) {
// Use a different translate helper depending on whether | 1 | <?php
/**
* Flash message view helper
*
* PHP version 5
*
* Copyright (C) Villanova University 2010.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package View_Helpers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
namespace VuFind\View\Helper\Root;
use Zend\View\Helper\AbstractHelper, Zend\Mvc\Controller\Plugin\FlashMessenger;
/**
* Flash message view helper
*
* @category VuFind
* @package View_Helpers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
class Flashmessages extends AbstractHelper
{
/**
* Flash messenger controller helper
*
* @var FlashMessenger
*/
protected $fm;
/**
* Constructor
*
* @param FlashMessenger $fm Flash messenger controller helper
*/
public function __construct(FlashMessenger $fm)
{
$this->fm = $fm;
}
/**
* Get the CSS class to correspond with a messenger namespace
*
* @param string $ns Namespace
*
* @return string
*/
protected function getClassForNamespace($ns)
{
return $ns;
}
/**
* Generate flash message <div>'s with appropriate classes based on message type.
*
* @return string $html
*/
public function __invoke()
{
$html = '';
$namespaces = ['error', 'info', 'success'];
foreach ($namespaces as $ns) {
$messages = array_merge(
$this->fm->getMessages($ns), $this->fm->getCurrentMessages($ns)
);
foreach (array_unique($messages, SORT_REGULAR) as $msg) {
$html .= '<div class="' . $this->getClassForNamespace($ns) . '">';
// Advanced form:
if (is_array($msg)) {
// Use a different translate helper depending on whether
// or not we're in HTML mode.
if (!isset($msg['translate']) || $msg['translate']) {
$helper = (isset($msg['html']) && $msg['html'])
? 'translate' : 'transEsc';
} else {
$helper = (isset($msg['html']) && $msg['html'])
? false : 'escapeHtml';
}
$helper = $helper
? $this->getView()->plugin($helper) : false;
$tokens = isset($msg['tokens']) ? $msg['tokens'] : [];
$default = isset($msg['default']) ? $msg['default'] : null;
$html .= $helper
? $helper($msg['msg'], $tokens, $default) : $msg['msg'];
} else {
// Basic default string:
$transEsc = $this->getView()->plugin('transEsc');
$html .= $transEsc($msg);
}
$html .= '</div>';
}
$this->fm->clearMessages($ns);
$this->fm->clearCurrentMessages($ns);
}
return $html;
}
}
| 1 | 24,818 | Should we be escaping $attr and/or $value here? Seems like htmlspecialchars() might be in order to be on the safe side. | vufind-org-vufind | php |
@@ -40,6 +40,7 @@ module Travis
def clone_args
args = "--depth=#{depth}"
args << " --branch=#{branch}" unless data.ref
+ args << " #{quiet_o}"
args
end
| 1 | require 'shellwords'
module Travis
module Build
class Git
class Clone < Struct.new(:sh, :data)
def apply
sh.fold 'git.checkout' do
clone_or_fetch
sh.cd dir
fetch_ref if fetch_ref?
checkout
end
end
private
def clone_or_fetch
sh.if "! -d #{dir}/.git" do
sh.cmd "git clone #{clone_args} #{data.source_url} #{dir}", assert: true, retry: true
end
sh.else do
sh.cmd "git -C #{dir} fetch origin", assert: true, retry: true
sh.cmd "git -C #{dir} reset --hard", assert: true, timing: false
end
end
def fetch_ref
sh.cmd "git fetch origin +#{data.ref}:", assert: true, retry: true
end
def fetch_ref?
!!data.ref
end
def checkout
sh.cmd "git checkout -qf #{data.pull_request ? 'FETCH_HEAD' : data.commit}", timing: false
end
def clone_args
args = "--depth=#{depth}"
args << " --branch=#{branch}" unless data.ref
args
end
def depth
config[:git][:depth].to_s.shellescape
end
def branch
data.branch.shellescape
end
def dir
data.slug
end
def config
data.config
end
end
end
end
end
| 1 | 12,702 | why not remove the need for the `quiet_o` method and just add it similar to how the `--branch` is added? also, looks like the indenting is a little off | travis-ci-travis-build | rb |
@@ -96,8 +96,9 @@ public class FlowContainer {
private static final String CONF_DIR = "conf";
private static final String JOB_THREAD_COUNT = "flow.num.job.threads";
private static final String DEFAULT_LOG_CHUNK_SIZE = "5MB";
+ private static final String FLOW_EXECUTION_ID = "FLOW_EXECUTION_ID";
+ private static final String VERSION_SET_ID = "VERSION_SET_ID";
private static final int DEFAULT_LOG_NUM_FILES = 4;
- private static final int EXEC_ID_INDEX = 0;
private static final int DEFAULT_JOB_TREAD_COUNT = 10;
private static final boolean DEFAULT_USE_IN_MEMORY_KEYSTORE = false;
// Should validate proxy user | 1 | /*
* Copyright 2020 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.container;
import static azkaban.ServiceProvider.SERVICE_PROVIDER;
import static azkaban.common.ExecJettyServerModule.*;
import static com.google.common.base.Preconditions.*;
import azkaban.AzkabanCommonModule;
import azkaban.Constants;
import azkaban.Constants.PluginManager;
import azkaban.common.ExecJettyServerModule;
import azkaban.execapp.AbstractFlowPreparer;
import azkaban.execapp.AzkabanExecutorServer;
import azkaban.execapp.ExecMetrics;
import azkaban.execapp.FlowRunner;
import azkaban.execapp.event.FlowWatcher;
import azkaban.execapp.event.RemoteFlowWatcher;
import azkaban.execapp.jmx.JmxJobMBeanManager;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.executor.Status;
import azkaban.jobtype.HadoopJobUtils;
import azkaban.jobtype.HadoopProxy;
import azkaban.jobtype.JobTypeManager;
import azkaban.metrics.CommonMetrics;
import azkaban.metrics.MetricsManager;
import azkaban.project.ProjectLoader;
import azkaban.security.commons.HadoopSecurityManager;
import azkaban.server.AzkabanServer;
import azkaban.spi.AzkabanEventReporter;
import azkaban.storage.ProjectStorageManager;
import azkaban.utils.*;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.FileIOUtils.JobMetaData;
import com.codahale.metrics.MetricRegistry;
import com.google.common.annotations.VisibleForTesting;
import com.google.inject.Guice;
import com.google.inject.Injector;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.KeyStore;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Named;
import javax.inject.Singleton;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is the entrypoint for launching a flow execution in a container.
* It sets up the Azkaban Properties, the DAO, injects all the required classes, sets up the
* execution directory along with the project files, creates the FlowRunner and submits it to
* the executor service for execution.
* It assumes that their is a certain directory structure consisting of all the dependencies.
* 1.The dependencies such as Hadoop, Hive, Pig, and other libraries.
* 2.The jobtype plugins are expected in "$AZ_HOME/plugins/jobtypes">
* 3.The FlowContainer creates the project directory named "project" which contains all the
* project dependencies. It also serves as execution directory.
*
* The Flow's status is DISPATCHING when FlowContainer is called. It's status is set to
* PREPARING before FlowRunner is created. The rest of the state machine is handled by FlowRunner.
*/
public class FlowContainer {
private static final String JOBTYPE_DIR = "plugins/jobtypes";
private static final String CONF_ARG = "-conf";
private static final String CONF_DIR = "conf";
private static final String JOB_THREAD_COUNT = "flow.num.job.threads";
private static final String DEFAULT_LOG_CHUNK_SIZE = "5MB";
private static final int DEFAULT_LOG_NUM_FILES = 4;
private static final int EXEC_ID_INDEX = 0;
private static final int DEFAULT_JOB_TREAD_COUNT = 10;
private static final boolean DEFAULT_USE_IN_MEMORY_KEYSTORE = false;
// Should validate proxy user
public static final boolean DEFAULT_VALIDATE_PROXY_USER = false;
public static final String JOB_LOG_CHUNK_SIZE = "job.log.chunk.size";
public static final String JOB_LOG_BACKUP_INDEX = "job.log.backup.index";
public static final String PROXY_USER_LOCK_DOWN = "proxy.user.lock.down";
private static final Logger logger = LoggerFactory.getLogger(FlowContainer.class);
private final ExecutorService executorService;
private final ExecutorLoader executorLoader;
private final ProjectLoader projectLoader;
private final JobTypeManager jobTypeManager;
private final AbstractFlowPreparer flowPreparer;
private final Server jettyServer;
private final Context containerContext;
private final AzkabanEventReporter eventReporter;
private final Props azKabanProps;
private Props globalProps;
private final int numJobThreadPerFlow;
private Path execDirPath;
private int port; // Listener port for incoming control & log messages (ContainerServlet)
private FlowRunner flowRunner;
private ExecutableFlow flow; // A flow container is tasked to only run a single flow
// Max chunk size is 20MB.
private final String jobLogChunkSize;
private final int jobLogNumFiles;
// If true, jobs will validate proxy user against a list of valid proxy users.
private final boolean validateProxyUser;
/**
* Constructor of FlowContainer.
* It sets up all the DAO, all the loaders and Azkaban KeyStore.
* @param props Azkaban properties.
* @throws IOException
*/
@Inject
@Singleton
public FlowContainer(final Props props,
final ExecutorLoader executorLoader,
final ProjectLoader projectLoader,
@Nullable final AzkabanEventReporter eventReporter,
@Named(EXEC_JETTY_SERVER) final Server jettyServer,
@Named(EXEC_CONTAINER_CONTEXT) final Context context) throws ExecutorManagerException {
// Create Azkaban Props Map
this.azKabanProps = props;
// Setup global props if applicable
final String globalPropsPath = this.azKabanProps.getString("executor.global.properties", null);
if (globalPropsPath != null) {
try {
this.globalProps = new Props(null, globalPropsPath);
} catch (final IOException e) {
logger.error("Error creating global properties :" + globalPropsPath, e);
throw new ExecutorManagerException(e);
}
}
this.executorLoader = executorLoader;
logger.info("executorLoader from guice :" + this.executorLoader);
// project Loader
this.projectLoader = projectLoader;
logger.info("projectLoader from guice : " + this.projectLoader);
// setup executor service
this.executorService = Executors.newSingleThreadExecutor();
// jetty server
this.jettyServer = jettyServer;
this.containerContext = context;
this.eventReporter = eventReporter;
this.jobLogChunkSize = this.azKabanProps.getString(JOB_LOG_CHUNK_SIZE,
DEFAULT_LOG_CHUNK_SIZE);
this.jobLogNumFiles = this.azKabanProps.getInt(JOB_LOG_BACKUP_INDEX, DEFAULT_LOG_NUM_FILES);
this.validateProxyUser = this.azKabanProps.getBoolean(PROXY_USER_LOCK_DOWN,
DEFAULT_VALIDATE_PROXY_USER);
this.jobTypeManager =
new JobTypeManager(
this.azKabanProps.getString(AzkabanExecutorServer.JOBTYPE_PLUGIN_DIR,
PluginManager.JOBTYPE_DEFAULTDIR),
this.globalProps, getClass().getClassLoader());
this.numJobThreadPerFlow = props.getInt(JOB_THREAD_COUNT, DEFAULT_JOB_TREAD_COUNT);
if (this.azKabanProps.getBoolean(Constants.USE_IN_MEMORY_KEYSTORE,
DEFAULT_USE_IN_MEMORY_KEYSTORE)) {
// Setting up the in-memory KeyStore for all the job executions in the flow.
setupKeyStore();
}
// Create a flow preparer
this.flowPreparer = new ContainerizedFlowPreparer(
SERVICE_PROVIDER.getInstance(ProjectStorageManager.class),
SERVICE_PROVIDER.getInstance(DependencyTransferManager.class));
}
/**
* The entry point of FlowContainer. Validates the input arguments and submits the flow for
* execution. It is assumed that AZ_HOME environment variable is set. If it is not set, then
* it explicitly sets it to present working directory.
* @param args Takes the execution id and Project zip file path as inputs.
* @throws IOException
* @throws ExecutorManagerException
*/
public static void main(final String[] args) throws ExecutorManagerException {
// Redirect all std out and err messages into slf4j
StdOutErrRedirect.redirectOutAndErrToLog();
// Get all the arguments
final String execIdStr = args[EXEC_ID_INDEX];
// Process Execution ID.
int execId = -1;
try {
execId = Integer.parseInt(execIdStr);
} catch (NumberFormatException ne) {
logger.error("Execution ID passed in argument is invalid {}", execIdStr);
throw new ExecutorManagerException(ne);
}
logger.info("Execution ID : " + execId);
final Path currentDir = ContainerizedFlowPreparer.getCurrentDir();
// Set Azkaban props
final Path jobtypePluginPath = Paths.get(currentDir.toString(), JOBTYPE_DIR);
Props azkabanProps = setAzkabanProps(jobtypePluginPath);
// Setup Injector
setInjector(azkabanProps);
// Constructor
final FlowContainer flowContainer = SERVICE_PROVIDER.getInstance(FlowContainer.class);
flowContainer.start();
launchCtrlMsgListener(flowContainer);
// TODO : Revisit this logic with full implementation for JMXBEanManager and other callback mechanisms
JmxJobMBeanManager.getInstance().initialize(azkabanProps);
// execute the flow
flowContainer.submitFlow(execId, currentDir);
}
/**
* Set Azkaban Props
* @param jobtypePluginPath Path where all the jobtype plugins are mounted.
* @return Populated Azkaban properties.
*/
private static Props setAzkabanProps(final Path jobtypePluginPath) {
final Map<String, String> propsMap = new HashMap<>();
propsMap.put(AzkabanExecutorServer.JOBTYPE_PLUGIN_DIR,
jobtypePluginPath.toString());
// Setup the azkaban.properties here.
final String[] args = {CONF_ARG, CONF_DIR};
final Props props = AzkabanServer.loadProps(args);
return new Props(props, propsMap);
}
@VisibleForTesting
static void setInjector(final Props azkabanProps){
// Inject AzkabanCommonModule
final Injector injector = Guice.createInjector(
new AzkabanCommonModule(azkabanProps),
new ExecJettyServerModule()
);
SERVICE_PROVIDER.setInjector(injector);
}
/**
* Submit flow
* Creates and submits the FlowRunner.
* @param execId Execution Id of the flow.
* @throws ExecutorManagerException
*/
@VisibleForTesting
void submitFlow(final int execId, final Path currentDir)
throws ExecutorManagerException {
final ExecutableFlow flow = this.executorLoader.fetchExecutableFlow(execId);
if (flow == null) {
logger.error("Error loading flow with execution Id " + execId);
throw new ExecutorManagerException("Error loading flow for exec: " + execId +
". Terminating flow container launch");
}
// Update the status of the flow from DISPATCHING to PREPARING
this.flow = flow;
flow.setStatus(Status.PREPARING);
this.executorLoader.updateExecutableFlow(flow);
this.flowRunner = createFlowRunner(flow);
submitFlowRunner(this.flowRunner);
}
/**
* Create Flow Runner and setup the flow execution directory with project dependencies.
* @param flow Executable flow object.
* @return FlowRunner object.
* @throws ExecutorManagerException
*/
private FlowRunner createFlowRunner(final ExecutableFlow flow) throws ExecutorManagerException {
// Prepare the flow with project dependencies.
this.flowPreparer.setup(flow);
// Setup flow watcher
FlowWatcher watcher = null;
final ExecutionOptions options = flow.getExecutionOptions();
if (options.getPipelineExecutionId() != null) {
final int pipelinedExecId = options.getPipelineExecutionId();
watcher = new RemoteFlowWatcher(pipelinedExecId, this.executorLoader);
}
// TODO : figure out the metrics
// Create the FlowRunner
final MetricsManager metricsManager = new MetricsManager(new MetricRegistry());
final CommonMetrics commonMetrics = new CommonMetrics(metricsManager);
final ExecMetrics execMetrics = new ExecMetrics(metricsManager);
final AzkabanEventReporter eventReporter =
SERVICE_PROVIDER.getInstance(AzkabanEventReporter.class);
final FlowRunner flowRunner = new FlowRunner(flow, this.executorLoader,
this.projectLoader, this.jobTypeManager, this.azKabanProps, eventReporter,
null, commonMetrics, execMetrics);
flowRunner.setFlowWatcher(watcher)
.setJobLogSettings(this.jobLogChunkSize, this.jobLogNumFiles)
.setValidateProxyUser(this.validateProxyUser)
.setNumJobThreads(this.numJobThreadPerFlow);
return flowRunner;
}
/**
* Submits the flow to executorService for execution.
* @param flowRunner The FlowRunner object.
*/
private void submitFlowRunner(final FlowRunner flowRunner) throws ExecutorManagerException {
// set running flow, put it in DB
logger.info("Submitting flow with execution Id " + flowRunner.getExecutionId());
final Future<?> flowFuture = this.executorService.submit(flowRunner);
try {
flowFuture.get();
} catch (final InterruptedException | ExecutionException e) {
logger.error(ExceptionUtils.getStackTrace(e));
throw new ExecutorManagerException(e);
}
}
/**
* Setup in-memory keystore to be reused for all the job executions in the flow.
* @throws IOException
*/
private void setupKeyStore() throws ExecutorManagerException {
// Fetch keyStore props and use it to get the KeyStore, put it in JobTypeManager
Props commonPluginLoadProps = this.jobTypeManager.getCommonPluginLoadProps();
if (commonPluginLoadProps != null) {
// Load HadoopSecurityManager
HadoopSecurityManager hadoopSecurityManager = null;
try {
final String hadoopSecurityClassName =
commonPluginLoadProps.getString(HadoopJobUtils.HADOOP_SECURITY_MANAGER_CLASS_PARAM);
final Class<?> hadoopSecurityManagerClass =
HadoopProxy.class.getClassLoader().loadClass(hadoopSecurityClassName);
logger.info("Loading hadoop security manager " + hadoopSecurityManagerClass.getName());
hadoopSecurityManager = (HadoopSecurityManager)
Utils.callConstructor(hadoopSecurityManagerClass, commonPluginLoadProps);
} catch (final Exception e) {
logger.error("Could not instantiate Hadoop Security Manager ", e);
throw new RuntimeException("Failed to get hadoop security manager!"
+ e.getCause(), e);
}
final KeyStore keyStore = hadoopSecurityManager.getKeyStore(commonPluginLoadProps);
if (keyStore == null) {
logger.error("Failed to Prefetch KeyStore");
throw new ExecutorManagerException("Failed to Prefetch KeyStore");
}
logger.info("In-memory Keystore is setup, delete the cert file");
// Delete the cert file from disk as the KeyStore is already cached above.
final File certFile = new File(this.azKabanProps.get(
Constants.ConfigurationKeys.CSR_KEYSTORE_LOCATION));
if (certFile.delete()) {
logger.info("Successfully deleted the cert file");
} else {
logger.error("Failed to delete the cert file");
throw new ExecutorManagerException("Failed to delete the cert file");
}
}
}
@VisibleForTesting
void start() {
this.containerContext.setAttribute(Constants.AZKABAN_CONTAINER_CONTEXT_KEY, this);
}
public void cancelFlow(final int execId, final String user)
throws ExecutorManagerException {
logger.info("Cancel Flow called");
if (this.flowRunner == null) {
logger.warn("Attempt to cancel flow execId: {} before flow got a chance to start.",
execId);
throw new ExecutorManagerException("Flow has not launched yet.");
}
if (Status.isStatusFinished(this.flowRunner.getExecutableFlow().getStatus())) {
logger.warn("Found a finished execution in the list of running flows: " + execId);
throw new ExecutorManagerException("Execution is already finished.");
}
this.flowRunner.kill(user);
}
/**
* Return accumulated flow logs with the specified length from the flow container starting from the given byte offset.
* @param execId
* @param startByte
* @param length
* @return
* @throws ExecutorManagerException
*/
public LogData readFlowLogs(final int execId, final int startByte, final int length)
throws ExecutorManagerException {
logger.info("readFlowLogs called");
if (this.flowRunner == null) {
logger.warn("Attempt to read flow logs before flow execId: {} got a chance to start",
execId);
throw new ExecutorManagerException("The flow has not launched yet!");
}
final File dir = flowRunner.getExecutionDir();
if (dir == null || !dir.exists()) {
logger.warn("Error reading file. Execution directory does not exist for flow execId: {}", execId);
throw new ExecutorManagerException("Error reading file. Execution directory does not exist");
}
try {
final File logFile = flowRunner.getFlowLogFile();
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
logger.warn("Flow log file does not exist for flow execId: {}", execId);
throw new ExecutorManagerException("Flow log file does not exist.");
}
} catch (final IOException e) {
logger.warn("IOException while trying to read flow log file for flow execId: {}",
execId);
throw new ExecutorManagerException(e);
}
}
/**
* Return accumulated job logs for a specific job starting with the provided byte offset.
* @param execId
* @param jobId
* @param attempt
* @param startByte
* @param length
* @return
* @throws ExecutorManagerException
*/
public LogData readJobLogs(final int execId, final String jobId, final int attempt,
final int startByte, final int length) throws ExecutorManagerException {
logger.info("readJobLogs called");
if (this.flowRunner == null) {
logger.warn("Attempt to read job logs before flow got a chance to start. " +
"Flow execId: {}, jobId: {}", execId, jobId);
throw new ExecutorManagerException("The flow has not launched yet!");
}
final File dir = flowRunner.getExecutionDir();
if (dir == null || !dir.exists()) {
logger.warn("Error reading jobLogs. Execution dir does not exist. execId: {}, jobId: {}",
execId, jobId);
throw new ExecutorManagerException(
"Error reading file. Execution directory does not exist.");
}
try {
final File logFile = flowRunner.getJobLogFile(jobId, attempt);
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
logger.warn("Job log file does not exist. Flow execId: {}, jobId: {}",
execId, jobId);
throw new ExecutorManagerException("Job log file does not exist.");
}
} catch (final IOException e) {
logger.warn("IOException while trying to read Job logs. execId: {}, jobId: {}",
execId, jobId);
throw new ExecutorManagerException(e);
}
}
/**
*
* @param execId
* @param jobId
* @param attempt
* @param startByte
* @param length
* @return
* @throws ExecutorManagerException
*/
public JobMetaData readJobMetaData(final int execId, final String jobId,
final int attempt, final int startByte, final int length) throws ExecutorManagerException {
logger.info("readJobMetaData called");
if (this.flowRunner == null) {
logger.warn("Metadata cannot be read as flow has not started. execId: {}, jobId: {}",
execId, jobId);
throw new ExecutorManagerException("The flow has not launched yet.");
}
final File dir = flowRunner.getExecutionDir();
if (dir == null || !dir.exists()) {
logger.warn("Execution directory does not exist. execId: {}, jobId: {}",
execId, jobId);
throw new ExecutorManagerException(
"Error reading file. Execution directory does not exist.");
}
try {
final File metaDataFile = flowRunner.getJobMetaDataFile(jobId, attempt);
if (metaDataFile != null && metaDataFile.exists()) {
return FileIOUtils.readUtf8MetaDataFile(metaDataFile, startByte, length);
} else {
logger.warn("Job metadata file does not exist. execId: {}, jobId: {}",
execId, jobId);
throw new ExecutorManagerException("Job metadata file does not exist.");
}
} catch (final IOException e) {
logger.warn("IOException while trying to read metadata file. execId: {}, jobId: {}",
execId, jobId);
throw new ExecutorManagerException(e);
}
}
@VisibleForTesting
static void launchCtrlMsgListener(FlowContainer flowContainer) {
try {
flowContainer.jettyServer.start();
} catch (final Exception e) {
logger.error(e.getMessage());
}
// TODO Add hook for JobCallback
final Connector[] connectors = flowContainer.jettyServer.getConnectors();
checkState(connectors.length >= 1, "Server must have at least 1 connector");
// The first connector is created upon initializing the server. That's the one that has the port.
flowContainer.port = connectors[0].getLocalPort();
logger.info("Listening on port {} for control messages.", flowContainer.port);
}
}
| 1 | 21,191 | These variables are already created in constants. Please reuse those. | azkaban-azkaban | java |
@@ -156,6 +156,10 @@ class Key(object):
if self.resp == None:
self.mode = 'r'
+ if self.version_id:
+ query_args = query_args or []
+ query_args.append('versionId=%s' % self.version_id)
+
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers, | 1 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import mimetypes
import os
import re
import rfc822
import StringIO
import base64
import urllib
import boto.utils
from boto.exception import BotoClientError
from boto.provider import Provider
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5
try:
from hashlib import md5
except ImportError:
from md5 import md5
class Key(object):
DefaultContentType = 'application/octet-stream'
BufferSize = 8192
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.filename = None
self.etag = None
self.last_modified = None
self.owner = None
self.storage_class = 'STANDARD'
self.md5 = None
self.base64md5 = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
def __repr__(self):
if self.bucket:
return '<Key: %s,%s>' % (self.bucket.name, self.name)
else:
return '<Key: None,%s>' % self.name
def __getattr__(self, name):
if name == 'key':
return self.name
else:
raise AttributeError
def __setattr__(self, name, value):
if name == 'key':
self.__dict__['name'] = value
else:
self.__dict__[name] = value
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket:
if self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
import binascii
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = base64.encodestring(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def open_read(self, headers=None, query_args=None,
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string (ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if self.resp == None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name,value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self):
if self.resp:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key
will be used.
"""
if new_storage_class == 'STANDARD':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=False, preserve_acl=True)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=True, preserve_acl=True)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key.
If metadata is supplied, it will replace the
metadata of the source key being copied.
If no metadata is supplied, the source key's
metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the storage
class of the new Key to be
REDUCED_REDUNDANCY regardless of the
storage class of the key being copied.
The Reduced Redundancy Storage (RRS)
feature of S3, provides lower
redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key
will be copied to the destination
key. If False, the destination key
will have the default ACL.
Note that preserving the ACL in the
new key object will require two
additional API calls to S3, one to
retrieve the current ACL and one to
set that ACL on the new object. If
you don't care about the ACL, a value
of False will be significantly more
efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value.encode('utf-8')
elif name == 'ETag':
self.etag = value
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name))
def delete(self):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
self.metadata[name] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket != None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket != None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:rtype: string
:return: The URL to access the key
"""
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the file
transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
"""
provider = self.bucket.connection.provider
def sender(http_conn, method, path, data, headers):
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
if chunked_transfer:
# MD5 for the stream has to be calculated on the fly, as
# we don't know the size of the stream before hand.
m = md5()
else:
fp.seek(0)
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 3 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 3:
http_conn.set_debuglevel(0)
if cb:
if chunked_transfer:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred.
cb_count = (1024 * 1024)/self.BufferSize
self.size = 0
elif num_cb > 2:
cb_count = self.size / self.BufferSize / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = total_bytes = 0
cb(total_bytes, self.size)
l = fp.read(self.BufferSize)
while len(l) > 0:
if chunked_transfer:
http_conn.send('%x;\r\n' % len(l))
http_conn.send(l)
http_conn.send('\r\n')
else:
http_conn.send(l)
if cb:
total_bytes += len(l)
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes, self.size)
i = 0
if chunked_transfer:
m.update(l)
l = fp.read(self.BufferSize)
if chunked_transfer:
http_conn.send('0\r\n')
http_conn.send('\r\n')
if cb:
self.size = total_bytes
# Get the md5 which is calculated on the fly.
self.md5 = m.hexdigest()
else:
fp.seek(0)
if cb:
cb(total_bytes, self.size)
response = http_conn.getresponse()
body = response.read()
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
if ((response.status == 500 or response.status == 503 or
response.getheader('location')) and not chunked_transfer):
# we'll try again.
return response
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
return response
else:
raise provider.storage_response_error(
response.status, response.reason, body)
if not headers:
headers = {}
else:
headers = headers.copy()
headers['User-Agent'] = UserAgent
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if headers.has_key('Content-Encoding'):
self.content_encoding = headers['Content-Encoding']
if headers.has_key('Content-Type'):
self.content_type = headers['Content-Type']
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if not chunked_transfer:
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers,
sender=sender,
query_args=query_args)
self.handle_version_headers(resp, force=True)
def compute_md5(self, fp):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer
will be reset to the beginning of the file before the
method returns.
:rtype: tuple
:return: A tuple containing the hex digest version of the MD5 hash
as the first element and the base64 encoded version of the
plain digest as the second element.
"""
tup = compute_md5(fp)
# Returned values are MD5 hash, base64 encoded MD5 hash, and file size.
# The internal implementation of compute_md5() needs to return the
# file size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = tup[2]
return tup[0:2]
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the Content-Size and
Content-MD5 in the header. So for huge uploads, the delay in calculating
MD5 is avoided but with a penalty of inability to verify the integrity
of the uploaded data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
# Set the Transfer Encoding for Streams.
headers['Transfer-Encoding'] = 'chunked'
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
k = self.bucket.lookup(self.name)
if k:
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method
will first check to see if an object exists in the
bucket with the same key. If it does, it won't
overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the
file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of
the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
provider = self.bucket.connection.provider
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5:
md5 = self.compute_md5(fp)
else:
# even if md5 is provided, still need to set size of content
fp.seek(0, 2)
self.size = fp.tell()
fp.seek(0)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
k = self.bucket.lookup(self.name)
if k:
return
self.send_file(fp, headers, cb, num_cb, query_args)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
fp = open(filename, 'rb')
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be
REDUCED_REDUNDANCY. The Reduced Redundancy
Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and
will be stored in an encrypted form while
at rest in S3.
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if cb:
if num_cb > 2:
cb_count = self.size / self.BufferSize / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = total_bytes = 0
cb(total_bytes, self.size)
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = []
if torrent:
query_args.append('torrent')
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (key, urllib.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
for bytes in self:
fp.write(bytes)
if cb:
total_bytes += len(bytes)
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes, self.size)
i = 0
if cb:
cb(total_bytes, self.size)
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
fp = open(filename, 'wb')
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
fp.close()
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified != None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception: pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
:rtype: string
:returns: The contents of the file as a string
"""
fp = StringIO.StringIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
return fp.getvalue()
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the command
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl()
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
| 1 | 8,036 | query_args is a string, not a list. Therefore you cannot call append to it. Also, line 986-989 of key.py already have code that is appending the versionId query parameter. | boto-boto | py |
@@ -548,12 +548,12 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Fil
if info != nil {
err := o.decodeMetaData(info)
if err != nil {
- return nil, err
+ return o, err
}
} else {
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err != nil {
- return nil, err
+ return o, err
}
}
return o, nil | 1 | // Package b2 provides an interface to the Backblaze B2 object storage system
package b2
// FIXME should we remove sha1 checks from here as rclone now supports
// checking SHA1s?
import (
"bufio"
"bytes"
"context"
"crypto/sha1"
"fmt"
gohash "hash"
"io"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
const (
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
timeKey = "src_last_modified_millis"
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
retryAfterHeader = "Retry-After"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
)
// Globals
var (
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "b2",
Description: "Backblaze B2",
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID",
Required: true,
}, {
Name: "key",
Help: "Application Key",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\nLeave blank normally.",
Advanced: true,
}, {
Name: "test_mode",
Help: `A flag string for X-Bz-Test-Mode header for debugging.
This is for debugging purposes only. Setting it to one of the strings
below will cause b2 to return specific errors:
* "fail_some_uploads"
* "expire_some_account_authorization_tokens"
* "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
Default: "",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "versions",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: "Permanently delete files on remote removal, otherwise hide files.",
Default: false,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory.
When uploading large files, chunk the file into this size. Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Default: false,
Advanced: true,
}, {
Name: "download_url",
Help: `Custom endpoint for downloads.
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network.
This is probably only useful for a public bucket.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true,
}, {
Name: "download_auth_duration",
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.`,
Default: fs.Duration(7 * 24 * time.Hour),
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
}
// Fs represents a remote b2 server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
srv *rest.Client // the connection to the b2 server
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
bucketIDMutex sync.Mutex // mutex to protect _bucketID
_bucketID string // the ID of the bucket we are working on
bucketTypeMutex sync.Mutex // mutex to protect _bucketType
_bucketType string // the Type of the bucket we are working on
info api.AuthorizeAccountResponse // result of authorize call
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadURLResponse // result of get upload URL calls
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
bufferTokens chan []byte // control concurrency of multipart uploads
}
// Object describes a b2 object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
id string // b2 id of the file
modTime time.Time // The modified time of the object if known
sha1 string // SHA-1 hash if known
size int64 // Size of the object
mimeType string // Content-Type of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("B2 bucket %s", f.bucket)
}
return fmt.Sprintf("B2 bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Pattern to match a b2 path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a b2 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in b2 path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (eg "Token has expired")
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable
504, // Gateway Time-out
}
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
// For 429 or 503 errors look at the Retry-After: header and
// set the retry appropriately, starting with a minimum of 1
// second if it isn't set.
if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
var retryAfter = 1
retryAfterString := resp.Header.Get(retryAfterHeader)
if retryAfterString != "" {
var err error
retryAfter, err = strconv.Atoi(retryAfterString)
if err != nil {
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
}
}
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil && resp.StatusCode == 401 {
fs.Debugf(f, "Unauthorized: %v", err)
// Reauth
authErr := f.authorizeAccount()
if authErr != nil {
err = authErr
}
return true, err
}
return f.shouldRetryNoReauth(resp, err)
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
if err != nil {
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.Code == "" {
errResponse.Code = "unknown"
}
if errResponse.Status == 0 {
errResponse.Status = resp.StatusCode
}
if errResponse.Message == "" {
errResponse.Message = "Unknown " + resp.Status
}
return errResponse
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
f.fillBufferTokens() // reset the buffer tokens
}
return
}
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize {
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(&f.opt, cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "b2: chunk size")
}
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if opt.Account == "" {
return nil, errors.New("account not found")
}
if opt.Key == "" {
return nil, errors.New("key not found")
}
if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint
}
f := &Fs{
name: name,
opt: *opt,
bucket: bucket,
root: directory,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
// Set the test flag if required
if opt.TestMode != "" {
testMode := strings.TrimSpace(opt.TestMode)
f.srv.SetHeader(testModeHeader, testMode)
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
}
f.fillBufferTokens()
err = f.authorizeAccount()
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
}
// If this is a key limited to a single bucket, it must exist already
if f.bucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.info.Allowed.BucketName
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.bucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.markBucketOK()
f.setBucketID(f.info.Allowed.BucketID)
}
if f.root != "" {
f.root += "/"
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
f.root = oldRoot
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// authorizeAccount gets the API endpoint and auth token. Can be used
// for reauthentication too.
func (f *Fs) authorizeAccount() error {
f.authMu.Lock()
defer f.authMu.Unlock()
opts := rest.Opts{
Method: "GET",
Path: "/b2api/v1/b2_authorize_account",
RootURL: f.opt.Endpoint,
UserName: f.opt.Account,
Password: f.opt.Key,
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, nil, &f.info)
return f.shouldRetryNoReauth(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to authenticate")
}
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil
}
// hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool {
for _, capability := range f.info.Allowed.Capabilities {
if capability == permission {
return true
}
}
return false
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func (f *Fs) getUploadURL() (upload *api.GetUploadURLResponse, err error) {
f.uploadMu.Lock()
defer f.uploadMu.Unlock()
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
if len(f.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_url",
}
var request = api.GetUploadURLRequest{
BucketID: bucketID,
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &upload)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
}
} else {
upload, f.uploads = f.uploads[0], f.uploads[1:]
}
return upload, nil
}
// returnUploadURL returns the UploadURL to the cache
func (f *Fs) returnUploadURL(upload *api.GetUploadURLResponse) {
if upload == nil {
return
}
f.uploadMu.Lock()
f.uploads = append(f.uploads, upload)
f.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (f *Fs) clearUploadURL() {
f.uploadMu.Lock()
f.uploads = nil
f.uploadMu.Unlock()
}
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
}
// getUploadBlock gets a block from the pool of size chunkSize
func (f *Fs) getUploadBlock() []byte {
buf := <-f.bufferTokens
if buf == nil {
buf = make([]byte, f.opt.ChunkSize)
}
// fs.Debugf(f, "Getting upload block %p", buf)
return buf
}
// putUploadBlock returns a block to the pool of size chunkSize
func (f *Fs) putUploadBlock(buf []byte) {
buf = buf[:cap(buf)]
if len(buf) != int(f.opt.ChunkSize) {
panic("bad blocksize returned to pool")
}
// fs.Debugf(f, "Returning upload block %p", buf)
f.bufferTokens <- buf
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
err := o.decodeMetaData(info)
if err != nil {
return nil, err
}
} else {
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// listFn is called from list to handle an object
type listFn func(remote string, object *api.File, isDirectory bool) error
// errEndList is a sentinel used to end the list iteration now.
// listFn should return it to end the iteration with no errors.
var errEndList = errors.New("end list")
// list lists the objects into the function supplied from
// the bucket and root supplied
//
// dir is the starting directory, "" for root
//
// level is the depth to search to
//
// If prefix is set then startFileName is used as a prefix which all
// files must have
//
// If limit is > 0 then it limits to that many files (must be less
// than 1000)
//
// If hidden is set then it will list the hidden (deleted) files too.
func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
root := f.root
if dir != "" {
root += dir + "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
bucketID, err := f.getBucketID()
if err != nil {
return err
}
chunkSize := 1000
if limit > 0 {
chunkSize = limit
}
var request = api.ListFileNamesRequest{
BucketID: bucketID,
MaxFileCount: chunkSize,
Prefix: root,
Delimiter: delimiter,
}
prefix = root + prefix
if prefix != "" {
request.StartFileName = prefix
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_file_names",
}
if hidden {
opts.Path = "/b2_list_file_versions"
}
for {
var response api.ListFileNamesResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return err
}
for i := range response.Files {
file := &response.Files[i]
// Finish if file name no longer has prefix
if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
return nil
}
if !strings.HasPrefix(file.Name, f.root) {
fs.Debugf(f, "Odd name received %q", file.Name)
continue
}
remote := file.Name[len(f.root):]
// Check for directory
isDirectory := strings.HasSuffix(remote, "/")
if isDirectory {
remote = remote[:len(remote)-1]
}
// Send object
err = fn(remote, file, isDirectory)
if err != nil {
if err == errEndList {
return nil
}
return err
}
}
// end if no NextFileName
if response.NextFileName == nil {
break
}
request.StartFileName = *response.NextFileName
if response.NextFileID != nil {
request.StartFileID = *response.NextFileID
}
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{})
return d, nil
}
if remote == *last {
remote = object.UploadTimestamp.AddVersion(remote)
} else {
*last = remote
}
// hide objects represent deleted files which we don't list
if object.Action == "hide" {
return nil, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
last := ""
err = f.list(ctx, dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, nil
}
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
return nil
})
if err != nil {
return nil, err
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
last := ""
err = f.list(ctx, dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// listBucketFn is called from listBucketsToFn to handle a bucket
type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_buckets",
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &account, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return err
}
for i := range response.Buckets {
err = fn(&response.Buckets[i])
if err != nil {
return err
}
}
return nil
}
// getbucketType finds the bucketType for the current bucket name
// can be one of allPublic. allPrivate, or snapshot
func (f *Fs) getbucketType() (bucketType string, err error) {
f.bucketTypeMutex.Lock()
defer f.bucketTypeMutex.Unlock()
if f._bucketType != "" {
return f._bucketType, nil
}
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
if bucket.Name == f.bucket {
bucketType = bucket.Type
}
return nil
})
if bucketType == "" {
err = fs.ErrorDirNotFound
}
f._bucketType = bucketType
return bucketType, err
}
// setBucketType sets the Type for the current bucket name
func (f *Fs) setBucketType(Type string) {
f.bucketTypeMutex.Lock()
f._bucketType = Type
f.bucketTypeMutex.Unlock()
}
// clearBucketType clears the Type for the current bucket name
func (f *Fs) clearBucketType() {
f.bucketTypeMutex.Lock()
f._bucketType = ""
f.bucketTypeMutex.Unlock()
}
// getBucketID finds the ID for the current bucket name
func (f *Fs) getBucketID() (bucketID string, err error) {
f.bucketIDMutex.Lock()
defer f.bucketIDMutex.Unlock()
if f._bucketID != "" {
return f._bucketID, nil
}
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
if bucket.Name == f.bucket {
bucketID = bucket.ID
}
return nil
})
if bucketID == "" {
err = fs.ErrorDirNotFound
}
f._bucketID = bucketID
return bucketID, err
}
// setBucketID sets the ID for the current bucket name
func (f *Fs) setBucketID(ID string) {
f.bucketIDMutex.Lock()
f._bucketID = ID
f.bucketIDMutex.Unlock()
}
// clearBucketID clears the ID for the current bucket name
func (f *Fs) clearBucketID() {
f.bucketIDMutex.Lock()
f._bucketID = ""
f.bucketIDMutex.Unlock()
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_create_bucket",
}
var request = api.CreateBucketRequest{
AccountID: f.info.AccountID,
Name: f.bucket,
Type: "allPrivate",
}
var response api.Bucket
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "duplicate_bucket_name" {
// Check this is our bucket - buckets are globally unique and this
// might be someone elses.
_, getBucketErr := f.getBucketID()
if getBucketErr == nil {
// found so it is our bucket
f.bucketOK = true
return nil
}
if getBucketErr != fs.ErrorDirNotFound {
fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
}
}
}
return errors.Wrap(err, "failed to create bucket")
}
f.setBucketID(response.ID)
f.setBucketType(response.Type)
f.bucketOK = true
return nil
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_delete_bucket",
}
bucketID, err := f.getBucketID()
if err != nil {
return err
}
var request = api.DeleteBucketRequest{
ID: bucketID,
AccountID: f.info.AccountID,
}
var response api.Bucket
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to delete bucket")
}
f.bucketOK = false
f.clearBucketID()
f.clearBucketType()
f.clearUploadURL()
return nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// hide hides a file on the remote
func (f *Fs) hide(Name string) error {
bucketID, err := f.getBucketID()
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_hide_file",
}
var request = api.HideFileRequest{
BucketID: bucketID,
Name: Name,
}
var response api.File
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "already_hidden" {
// sometimes eventual consistency causes this, so
// ignore this error since it is harmless
return nil
}
}
return errors.Wrapf(err, "failed to hide %q", Name)
}
return nil
}
// deleteByID deletes a file version given Name and ID
func (f *Fs) deleteByID(ID, Name string) error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_delete_file_version",
}
var request = api.DeleteFileRequest{
ID: ID,
Name: Name,
}
var response api.File
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrapf(err, "failed to delete %q", Name)
}
return nil
}
// purge deletes all the files and directories
//
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
var errReturn error
var checkErrMutex sync.Mutex
var checkErr = func(err error) {
if err == nil {
return
}
checkErrMutex.Lock()
defer checkErrMutex.Unlock()
if errReturn == nil {
errReturn = err
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
if time.Since(time.Time(timestamp)).Hours() > 24 {
return true
}
return false
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
var wg sync.WaitGroup
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
go func() {
defer wg.Done()
for object := range toBeDeleted {
accounting.Stats.Checking(object.Name)
checkErr(f.deleteByID(object.ID, object.Name))
accounting.Stats.DoneChecking(object.Name)
}
}()
}
last := ""
checkErr(f.list(ctx, "", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
accounting.Stats.Checking(remote)
if oldOnly && last != remote {
if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
}
} else {
fs.Debugf(remote, "Deleting (id %q)", object.ID)
toBeDeleted <- object
}
last = remote
accounting.Stats.DoneChecking(remote)
}
return nil
}))
close(toBeDeleted)
wg.Wait()
if !oldOnly {
checkErr(f.Rmdir(ctx, ""))
}
return errReturn
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error {
return f.purge(ctx, false)
}
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, true)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
destBucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: srcObj.id,
Name: f.root + remote,
MetadataDirective: "COPY",
DestBucketID: destBucketID,
}
var response api.FileInfo
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
o := &Object{
fs: f,
remote: remote,
}
err = o.decodeMetaDataFileInfo(&response)
if err != nil {
return nil, err
}
return o, nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.SHA1)
}
// getDownloadAuthorization returns authorization token for downloading
// without accout.
func (f *Fs) getDownloadAuthorization(remote string) (authorization string, err error) {
validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
}
if !f.hasPermission("shareFiles") {
return "", errors.New("sharing a file link requires the shareFiles permission")
}
bucketID, err := f.getBucketID()
if err != nil {
return "", err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_download_authorization",
}
var request = api.GetDownloadAuthorizationRequest{
BucketID: bucketID,
FileNamePrefix: path.Join(f.root, remote),
ValidDurationInSeconds: validDurationInSeconds,
}
var response api.GetDownloadAuthorizationResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return "", errors.Wrap(err, "failed to get download authorization")
}
return response.AuthorizationToken, nil
}
// PublicLink returns a link for downloading without accout.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
var RootURL string
if f.opt.DownloadURL == "" {
RootURL = f.info.DownloadURL
} else {
RootURL = f.opt.DownloadURL
}
absPath := "/" + path.Join(f.root, remote)
link = RootURL + "/file/" + urlEncode(f.bucket) + absPath
bucketType, err := f.getbucketType()
if err != nil {
return "", err
}
if bucketType == "allPrivate" || bucketType == "snapshot" {
AuthorizationToken, err := f.getDownloadAuthorization(remote)
if err != nil {
return "", err
}
link += "?Authorization=" + AuthorizationToken
}
return link, nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Sha-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.SHA1 {
return "", hash.ErrUnsupported
}
if o.sha1 == "" {
// Error is logged in readMetaData
err := o.readMetaData(ctx)
if err != nil {
return "", err
}
}
return o.sha1, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
o.id = ID
o.sha1 = SHA1
o.mimeType = mimeType
// Read SHA1 from metadata if it exists and isn't set
if o.sha1 == "" || o.sha1 == "none" {
o.sha1 = Info[sha1Key]
}
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)
return o.parseTimeString(Info[timeKey])
}
// decodeMetaData sets the metadata in the object from an api.File
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaData(info *api.File) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
}
// getMetaData gets the metadata from the object unconditionally
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
maxSearched := 1
var timestamp api.Timestamp
baseRemote := o.remote
if o.fs.opt.Versions {
timestamp, baseRemote = api.RemoveVersion(baseRemote)
maxSearched = maxVersions
}
err = o.fs.list(ctx, "", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
if isDirectory {
return nil
}
if remote == baseRemote {
if !timestamp.IsZero() && !timestamp.Equal(object.UploadTimestamp) {
return nil
}
info = object
}
return errEndList // read only 1 item
})
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
if info == nil {
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.id != "" {
return nil
}
info, err := o.getMetaData(ctx)
if err != nil {
return err
}
return o.decodeMetaData(info)
}
// timeString returns modTime as the number of milliseconds
// elapsed since January 1, 1970 UTC as a decimal string.
func timeString(modTime time.Time) string {
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
}
// parseTimeString converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable.
func (o *Object) parseTimeString(timeString string) (err error) {
if timeString == "" {
return nil
}
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return nil
}
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
//
// SHA-1 will also be updated once the request has completed.
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
// The error is logged in readMetaData
_ = o.readMetaData(ctx)
return o.modTime
}
// SetModTime sets the modification time of the Object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
info, err := o.getMetaData(ctx)
if err != nil {
return err
}
info.Info[timeKey] = timeString(modTime)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: o.id,
Name: o.fs.root + o.remote, // copy to same name
MetadataDirective: "REPLACE",
ContentType: info.ContentType,
Info: info.Info,
}
var response api.FileInfo
err = o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(&opts, &request, &response)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(&response)
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// openFile represents an Object open for reading
type openFile struct {
o *Object // Object we are reading for
resp *http.Response // response of the GET
body io.Reader // reading from here
hash gohash.Hash // currently accumulating SHA1
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
}
// newOpenFile wraps an io.ReadCloser and checks the sha1sum
func newOpenFile(o *Object, resp *http.Response) *openFile {
file := &openFile{
o: o,
resp: resp,
hash: sha1.New(),
}
file.body = io.TeeReader(resp.Body, file.hash)
return file
}
// Read bytes from the object - see io.Reader
func (file *openFile) Read(p []byte) (n int, err error) {
n, err = file.body.Read(p)
file.bytes += int64(n)
if err == io.EOF {
file.eof = true
}
return
}
// Close the object and checks the length and SHA1 if all the object
// was read
func (file *openFile) Close() (err error) {
// Close the body at the end
defer fs.CheckClose(file.resp.Body, &err)
// If not end of file then can't check SHA1
if !file.eof {
return nil
}
// Check to see we read the correct number of bytes
if file.o.Size() != file.bytes {
return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
}
// Check the SHA1
receivedSHA1 := file.o.sha1
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
}
return nil
}
// Check it satisfies the interfaces
var _ io.ReadCloser = &openFile{}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
opts := rest.Opts{
Method: "GET",
Options: options,
}
// Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl
if o.fs.opt.DownloadURL == "" {
opts.RootURL = o.fs.info.DownloadURL
} else {
opts.RootURL = o.fs.opt.DownloadURL
}
// Download by id if set otherwise by name
if o.id != "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
} else {
opts.Path += "/file/" + urlEncode(o.fs.bucket) + "/" + urlEncode(o.fs.root+o.remote)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
}
// Parse the time out of the headers if possible
err = o.parseTimeString(resp.Header.Get(timeHeader))
if err != nil {
_ = resp.Body.Close()
return nil, err
}
// Read sha1 from header if it isn't set
if o.sha1 == "" {
o.sha1 = resp.Header.Get(sha1Header)
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
// if sha1 header is "none" (in big files), then need
// to read it from the metadata
if o.sha1 == "none" {
o.sha1 = resp.Header.Get(sha1InfoHeader)
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
}
}
// Don't check length or hash on partial content
if resp.StatusCode == http.StatusPartialContent {
return resp.Body, nil
}
return newOpenFile(o, resp), nil
}
// dontEncode is the characters that do not need percent-encoding
//
// The characters that do not need percent-encoding are a subset of
// the printable ASCII characters: upper-case letters, lower-case
// letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
// "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
// be replaced with "%" and the two-digit hex value of the byte.
const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
`0123456789` +
`._-/~!$'()*;=:@`)
// noNeedToEncode is a bitmap of characters which don't need % encoding
var noNeedToEncode [256]bool
func init() {
for _, c := range dontEncode {
noNeedToEncode[c] = true
}
}
// urlEncode encodes in with % encoding
func urlEncode(in string) string {
var out bytes.Buffer
for i := 0; i < len(in); i++ {
c := in[i]
if noNeedToEncode[c] {
_ = out.WriteByte(c)
} else {
_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
}
}
return out.String()
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.opt.Versions {
return errNotWithVersions
}
err = o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
size := src.Size()
if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o.fs.getUploadBlock()
n, err := io.ReadFull(in, buf)
if err == nil {
bufReader := bufio.NewReader(in)
in = bufReader
_, err = bufReader.Peek(1)
}
if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src)
if err != nil {
o.fs.putUploadBlock(buf)
return err
}
return up.Stream(buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putUploadBlock(buf)
size = int64(n)
in = bytes.NewReader(buf[:n])
} else {
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
up, err := o.fs.newLargeUpload(ctx, o, in, src)
if err != nil {
return err
}
return up.Upload()
}
modTime := src.ModTime(ctx)
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
if calculatedSha1 == "" {
calculatedSha1 = "hex_digits_at_end"
har := newHashAppendingReader(in, sha1.New())
size += int64(har.AdditionalLength())
in = har
}
// Get upload URL
upload, err := o.fs.getUploadURL()
if err != nil {
return err
}
defer func() {
// return it like this because we might nil it out
o.fs.returnUploadURL(upload)
}()
// Headers for upload file
//
// Authorization
// required
// An upload authorization token, from b2_get_upload_url.
//
// X-Bz-File-Name
// required
//
// The name of the file, in percent-encoded UTF-8. See Files for requirements on file names. See String Encoding.
//
// Content-Type
// required
//
// The MIME type of the content of the file, which will be returned in
// the Content-Type header when downloading the file. Use the
// Content-Type b2/x-auto to automatically set the stored Content-Type
// post upload. In the case where a file extension is absent or the
// lookup fails, the Content-Type is set to application/octet-stream. The
// Content-Type mappings can be pursued here.
//
// X-Bz-Content-Sha1
// required
//
// The SHA1 checksum of the content of the file. B2 will check this when
// the file is uploaded, to make sure that the file arrived correctly. It
// will be returned in the X-Bz-Content-Sha1 header when the file is
// downloaded.
//
// X-Bz-Info-src_last_modified_millis
// optional
//
// If the original source of the file being uploaded has a last modified
// time concept, Backblaze recommends using this spelling of one of your
// ten X-Bz-Info-* headers (see below). Using a standard spelling allows
// different B2 clients and the B2 web user interface to interoperate
// correctly. The value should be a base 10 number which represents a UTC
// time when the original source file was last modified. It is a base 10
// number of milliseconds since midnight, January 1, 1970 UTC. This fits
// in a 64 bit integer such as the type "long" in the programming
// language Java. It is intended to be compatible with Java's time
// long. For example, it can be passed directly into the Java call
// Date.setTime(long time).
//
// X-Bz-Info-*
// optional
//
// Up to 10 of these headers may be present. The * part of the header
// name is replace with the name of a custom field in the file
// information stored with the file, and the value is an arbitrary UTF-8
// string, percent-encoded. The same info headers sent with the upload
// will be returned with the download.
opts := rest.Opts{
Method: "POST",
RootURL: upload.UploadURL,
Body: in,
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
"Content-Type": fs.MimeType(ctx, src),
sha1Header: calculatedSha1,
timeHeader: timeString(modTime),
},
ContentLength: &size,
}
var response api.FileInfo
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(&opts, nil, &response)
retry, err := o.fs.shouldRetry(resp, err)
// On retryable error clear UploadURL
if retry {
fs.Debugf(o, "Clearing upload URL because of error: %v", err)
upload = nil
}
return retry, err
})
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(&response)
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
if o.fs.opt.Versions {
return errNotWithVersions
}
if o.fs.opt.HardDelete {
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
}
return o.fs.hide(o.fs.root + o.remote)
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)
| 1 | 8,992 | I think this change and the one below will break the integration tests which expect a `nil` object if `NewObject` fails. | rclone-rclone | go |
@@ -1,6 +1,7 @@
# frozen_string_literal: true
require 'hocon'
+require 'bolt/error'
class TransportConfig
attr_accessor :host, :port, :ssl_cert, :ssl_key, :ssl_ca_cert, :ssl_cipher_suites, | 1 | # frozen_string_literal: true
require 'hocon'
class TransportConfig
attr_accessor :host, :port, :ssl_cert, :ssl_key, :ssl_ca_cert, :ssl_cipher_suites,
:loglevel, :logfile, :whitelist, :concurrency
def initialize(global = nil, local = nil)
@host = '127.0.0.1'
@port = 62658
@ssl_cert = nil
@ssl_key = nil
@ssl_ca_cert = nil
@ssl_cipher_suites = ['ECDHE-ECDSA-AES256-GCM-SHA384',
'ECDHE-RSA-AES256-GCM-SHA384',
'ECDHE-ECDSA-CHACHA20-POLY1305',
'ECDHE-RSA-CHACHA20-POLY1305',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES256-SHA384',
'ECDHE-RSA-AES256-SHA384',
'ECDHE-ECDSA-AES128-SHA256',
'ECDHE-RSA-AES128-SHA256']
@loglevel = 'notice'
@logfile = nil
@whitelist = nil
@concurrency = 100
global_path = global || '/etc/puppetlabs/bolt-server/conf.d/bolt-server.conf'
local_path = local || File.join(ENV['HOME'].to_s, ".puppetlabs", "bolt-server.conf")
load_config(global_path)
load_config(local_path)
validate
end
def load_config(path)
begin
parsed_hocon = Hocon.load(path)['bolt-server']
rescue Hocon::ConfigError => e
raise "Hocon data in '#{path}' failed to load.\n Error: '#{e.message}'"
rescue Errno::EACCES
raise "Your user doesn't have permission to read #{path}"
end
unless parsed_hocon.nil?
%w[host port ssl-cert ssl-key ssl-ca-cert ssl-cipher-suites loglevel logfile whitelist concurrency].each do |key|
varname = '@' + key.tr('-', '_')
instance_variable_set(varname, parsed_hocon[key]) if parsed_hocon.key?(key)
end
end
end
def validate
required_keys = %w[ssl_cert ssl_key ssl_ca_cert]
ssl_keys = %w[ssl_cert ssl_key ssl_ca_cert]
required_keys.each do |k|
next unless send(k).nil?
raise Bolt::ValidationError, <<-MSG
You must configure #{k} in either /etc/puppetlabs/bolt-server/conf.d/bolt-server.conf or ~/.puppetlabs/bolt-server.conf
MSG
end
unless @port.is_a?(Integer) && @port > 0
raise Bolt::ValidationError, "Configured 'port' must be a valid integer greater than 0"
end
ssl_keys.each do |sk|
unless File.file?(send(sk)) && File.readable?(send(sk))
raise Bolt::ValidationError, "Configured #{sk} must be a valid filepath"
end
end
unless @ssl_cipher_suites.is_a?(Array)
raise Bolt::ValidationError, "Configured 'ssl-cipher-suites' must be an array of cipher suite names"
end
unless @whitelist.nil? || @whitelist.is_a?(Array)
raise Bolt::ValidationError, "Configured 'whitelist' must be an array of names"
end
unless @concurrency.is_a?(Integer) && @concurrency.positive?
raise Bolt::ValidationError, "Configured 'concurrency' must be a positive integer"
end
end
end
| 1 | 9,745 | Looks like this isn't used in this file? | puppetlabs-bolt | rb |
@@ -327,7 +327,10 @@ class PandasLikeSeries(_Frame):
sum = df_dropna._spark_count()
df = df._spark_withColumn('count', F._spark_col('count') / F._spark_lit(sum))
- return _col(df.set_index([self.name]))
+ hidden_name = 'index' if self.name != 'index' else 'level_0'
+ df.columns = [hidden_name, self.name]
+ df._metadata = Metadata(column_fields=[self.name], index_info=[(hidden_name, None)])
+ return _col(df)
@property
def _pandas_anchor(self) -> DataFrame: | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base classes to be monkey-patched to DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from functools import reduce
import pandas as pd
import numpy as np
import pyspark.sql.functions as F
from pyspark.sql import DataFrame, Column
from pyspark.sql.types import FloatType, DoubleType, StructType, to_arrow_type
from pyspark.sql.utils import AnalysisException
from . import namespace
from .metadata import Metadata
from .selection import SparkDataFrameLocator
from ._dask_stubs.utils import derived_from
from ._dask_stubs.compatibility import string_types
__all__ = ['PandasLikeSeries', 'PandasLikeDataFrame', 'SparkSessionPatches', 'anchor_wrap']
max_display_count = 1000
class SparkSessionPatches(object):
"""
Methods for :class:`SparkSession`.
"""
def from_pandas(self, pdf):
metadata = Metadata.from_pandas(pdf)
reset_index = pdf.reset_index()
reset_index.columns = metadata.all_fields
df = self.createDataFrame(reset_index)
df._metadata = metadata
return df
from_pandas.__doc__ = namespace.from_pandas.__doc__
def read_csv(self, path, header='infer', names=None, usecols=None,
mangle_dupe_cols=True, parse_dates=False, comment=None):
if mangle_dupe_cols is not True:
raise ValueError("mangle_dupe_cols can only be `True`: %s" % mangle_dupe_cols)
if parse_dates is not False:
raise ValueError("parse_dates can only be `False`: %s" % parse_dates)
if usecols is not None and not callable(usecols):
usecols = list(usecols)
if usecols is None or callable(usecols) or len(usecols) > 0:
reader = self.read.option("inferSchema", "true")
if header == 'infer':
header = 0 if names is None else None
if header == 0:
reader.option("header", True)
elif header is None:
reader.option("header", False)
else:
raise ValueError("Unknown header argument {}".format(header))
if comment is not None:
if not isinstance(comment, string_types) or len(comment) != 1:
raise ValueError("Only length-1 comment characters supported")
reader.option("comment", comment)
df = reader.csv(path)
if header is None:
df = df._spark_selectExpr(*["`%s` as `%s`" % (field.name, i)
for i, field in enumerate(df.schema)])
if names is not None:
names = list(names)
if len(set(names)) != len(names):
raise ValueError('Found non-unique column index')
if len(names) != len(df.schema):
raise ValueError('Names do not match the number of columns: %d' % len(names))
df = df._spark_selectExpr(*["`%s` as `%s`" % (field.name, name)
for field, name in zip(df.schema, names)])
if usecols is not None:
if callable(usecols):
cols = [field.name for field in df.schema if usecols(field.name)]
missing = []
elif all(isinstance(col, int) for col in usecols):
cols = [field.name for i, field in enumerate(df.schema) if i in usecols]
missing = [col for col in usecols
if col >= len(df.schema) or df.schema[col].name not in cols]
elif all(isinstance(col, string_types) for col in usecols):
cols = [field.name for field in df.schema if field.name in usecols]
missing = [col for col in usecols if col not in cols]
else:
raise ValueError("'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable.")
if len(missing) > 0:
raise ValueError('Usecols do not match columns, columns expected but not '
'found: %s' % missing)
if len(cols) > 0:
df = df._spark_select(cols)
else:
df = self.createDataFrame([], schema=StructType())
else:
df = self.createDataFrame([], schema=StructType())
return df
read_csv.__doc__ = namespace.read_csv.__doc__
def read_parquet(self, path, columns=None):
if columns is not None:
columns = list(columns)
if columns is None or len(columns) > 0:
df = self.read.parquet(path)
if columns is not None:
fields = [field.name for field in df.schema]
cols = [col for col in columns if col in fields]
if len(cols) > 0:
df = df._spark_select(cols)
else:
df = self.createDataFrame([], schema=StructType())
else:
df = self.createDataFrame([], schema=StructType())
return df
read_parquet.__doc__ = namespace.read_parquet.__doc__
class _Frame(object):
"""
The base class for both dataframes and series.
"""
def max(self):
return _reduce_spark(self, F.max)
@derived_from(pd.DataFrame)
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
:return: :class:`Series` or :class:`DataFrame` with the absolute value of each element.
"""
return _spark_col_apply(self, F.abs)
def compute(self):
"""Alias of `toPandas()` to mimic dask for easily porting tests."""
return self.toPandas()
class PandasLikeSeries(_Frame):
"""
Methods that are appropriate for distributed series.
"""
def __init__(self):
""" Define additional private fields.
* ``_pandas_metadata``: The metadata which stores column fields, and index fields and names.
* ``_spark_ref_dataframe``: The reference to DataFraem anchored to this Column.
* ``_pandas_schema``: The schema when representing this Column as a DataFrame.
"""
self._pandas_metadata = None
self._spark_ref_dataframe = None
self._pandas_schema = None
@property
def dtype(self):
from .typing import as_python_type
return as_python_type(self.schema.fields[-1].dataType)
def astype(self, tpe):
from .typing import as_spark_type
spark_type = as_spark_type(tpe)
if not spark_type:
raise ValueError("Type {} not understood".format(tpe))
return anchor_wrap(self, self._spark_cast(spark_type))
def getField(self, name):
if not isinstance(self.schema, StructType):
raise AttributeError("Not a struct: {}".format(self.schema))
else:
fnames = self.schema.fieldNames()
if name not in fnames:
raise AttributeError(
"Field {} not found, possible values are {}".format(name, ", ".join(fnames)))
return anchor_wrap(self, self._spark_getField(name))
@property
def schema(self):
if not hasattr(self, '_pandas_schema') or self._pandas_schema is None:
self._pandas_schema = self.to_dataframe().schema
return self._pandas_schema
@property
def shape(self):
return len(self),
@property
def name(self):
return self._jc.toString()
@name.setter
def name(self, name):
self.rename(name, inplace=True)
def rename(self, name, inplace=False):
df = self.to_dataframe()._spark_select(self._metadata.index_fields +
[self._spark_alias(name)])
df._metadata = self._metadata.copy(column_fields=[name])
col = _col(df)
if inplace:
anchor_wrap(col, self)
self._jc = col._jc
self._pandas_schema = None
self._pandas_metadata = None
return self
else:
return col
@property
def _metadata(self):
if not hasattr(self, '_pandas_metadata') or self._pandas_metadata is None:
ref = self._pandas_anchor
self._pandas_metadata = ref._metadata.copy(column_fields=[self.name])
return self._pandas_metadata
@derived_from(pd.Series)
def reset_index(self, level=None, drop=False, name=None, inplace=False):
if inplace and not drop:
raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame')
if name is not None:
df = self.rename(name).to_dataframe()
else:
df = self.to_dataframe()
df = df.reset_index(level=level, drop=drop)
if drop:
col = _col(df)
if inplace:
anchor_wrap(col, self)
self._jc = col._jc
self._pandas_schema = None
self._pandas_metadata = None
else:
return col
else:
return df
@property
def loc(self):
return SparkDataFrameLocator(self)
def to_dataframe(self):
ref = self._pandas_anchor
df = ref._spark_select(self._metadata.index_fields + [self])
df._metadata = self._metadata.copy()
return df
def toPandas(self):
return _col(self.to_dataframe().toPandas())
@derived_from(pd.Series)
def isnull(self):
if isinstance(self.schema[self.name].dataType, (FloatType, DoubleType)):
return anchor_wrap(self, self._spark_isNull() | F._spark_isnan(self))
else:
return anchor_wrap(self, self._spark_isNull())
isna = isnull
@derived_from(pd.Series)
def notnull(self):
return ~self.isnull()
notna = notnull
@derived_from(pd.Series)
def dropna(self, axis=0, inplace=False, **kwargs):
col = _col(self.to_dataframe().dropna(axis=axis, inplace=False))
if inplace:
anchor_wrap(col, self)
self._jc = col._jc
self._pandas_schema = None
self._pandas_metadata = None
else:
return col
def head(self, n=5):
return _col(self.to_dataframe().head(n))
def unique(self):
# Pandas wants a series/array-like object
return _col(self.to_dataframe().unique())
@derived_from(pd.Series)
def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
df_dropna = self.to_dataframe()._spark_filter(self.notna())
else:
df_dropna = self.to_dataframe()
df = df_dropna._spark_groupby(self).count()
if sort:
if ascending:
df = df._spark_orderBy(F._spark_col('count'))
else:
df = df._spark_orderBy(F._spark_col('count')._spark_desc())
if normalize:
sum = df_dropna._spark_count()
df = df._spark_withColumn('count', F._spark_col('count') / F._spark_lit(sum))
return _col(df.set_index([self.name]))
@property
def _pandas_anchor(self) -> DataFrame:
"""
The anchoring dataframe for this column (if any).
:return:
"""
if hasattr(self, "_spark_ref_dataframe"):
return self._spark_ref_dataframe
n = self._pandas_orig_repr()
raise ValueError("No reference to a dataframe for column {}".format(n))
def __len__(self):
return len(self.to_dataframe())
def __getitem__(self, key):
return anchor_wrap(self, self._spark_getitem(key))
def __getattr__(self, item):
if item.startswith("__") or item.startswith("_pandas_") or item.startswith("_spark_"):
raise AttributeError(item)
return anchor_wrap(self, self.getField(item))
def __invert__(self):
return anchor_wrap(self, self.astype(bool) == F._spark_lit(False))
def __str__(self):
return self._pandas_orig_repr()
def __repr__(self):
return repr(self.head(max_display_count).toPandas())
def __dir__(self):
if not isinstance(self.schema, StructType):
fields = []
else:
fields = [f for f in self.schema.fieldNames() if ' ' not in f]
return super(Column, self).__dir__() + fields
def _pandas_orig_repr(self):
# TODO: figure out how to reuse the original one.
return 'Column<%s>' % self._jc.toString().encode('utf8')
class PandasLikeDataFrame(_Frame):
"""
Methods that are relevant to dataframes.
"""
def __init__(self):
""" Define additional private fields.
* ``_pandas_metadata``: The metadata which stores column fields, and index fields and names.
"""
self._pandas_metadata = None
@property
def _metadata(self):
if not hasattr(self, '_pandas_metadata') or self._pandas_metadata is None:
self._pandas_metadata = Metadata(column_fields=self.schema.fieldNames())
return self._pandas_metadata
@_metadata.setter
def _metadata(self, metadata):
self._pandas_metadata = metadata
@property
def _index_columns(self):
return [anchor_wrap(self, self._spark_getitem(field))
for field in self._metadata.index_fields]
@derived_from(pd.DataFrame)
def iteritems(self):
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
@derived_from(pd.DataFrame)
def to_html(self, index=True, classes=None):
return self.toPandas().to_html(index=index, classes=classes)
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns. By default
yields a new object.
:param keys: column label or list of column labels / arrays
:param drop: boolean, default True
Delete columns to be used as the new index
:param append: boolean, default False
Whether to append columns to existing index
:param inplace: boolean, default False
Modify the DataFrame in place (do not create a new object)
:return: :class:`DataFrame`
"""
if isinstance(keys, string_types):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
columns = [column for column in self._metadata.column_fields if column not in keys]
else:
columns = self._metadata.column_fields
if append:
index_info = self._metadata.index_info + [(column, column) for column in keys]
else:
index_info = [(column, column) for column in keys]
metadata = self._metadata.copy(column_fields=columns, index_info=index_info)
if inplace:
self._metadata = metadata
else:
df = self.copy()
df._metadata = metadata
return df
def reset_index(self, level=None, drop=False, inplace=False):
"""For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
:param level: int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by default
:param drop: boolean, default False
Do not try to insert index into dataframe columns. This resets the index to the
default integer index.
:param inplace: boolean, default False
Modify the DataFrame in place (do not create a new object)
:return: :class:`DataFrame`
"""
if len(self._metadata.index_info) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._metadata.index_info) > 1
if multi_index:
rename = lambda i: 'level_{}'.format(i)
else:
rename = lambda i: \
'index' if 'index' not in self._metadata.column_fields else 'level_{}'.fomat(i)
if level is None:
index_columns = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._metadata.index_info)]
index_info = []
else:
if isinstance(level, (int, string_types)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for l in level:
if l >= len(self._metadata.index_info):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._metadata.index_info), l + 1))
idx = level
elif all(isinstance(l, string_types) for l in level):
idx = []
for l in level:
try:
i = self._metadata.index_fields.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._metadata.index_fields[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
index_columns = []
index_info = self._metadata.index_info.copy()
for i in idx:
info = self._metadata.index_info[i]
column_field, index_name = info
index_columns.append((column_field,
index_name if index_name is not None else rename(index_name)))
index_info.remove(info)
if drop:
index_columns = []
metadata = self._metadata.copy(
column_fields=[column for column, _ in index_columns] + self._metadata.column_fields,
index_info=index_info)
columns = [name for _, name in index_columns] + self._metadata.column_fields
if inplace:
self._metadata = metadata
self.columns = columns
else:
df = self.copy()
df._metadata = metadata
df.columns = columns
return df
@derived_from(pd.DataFrame)
def isnull(self):
df = self.copy()
for name, col in df.iteritems():
df[name] = col.isnull()
return df
isna = isnull
@derived_from(pd.DataFrame)
def notnull(self):
df = self.copy()
for name, col in df.iteritems():
df[name] = col.notnull()
return df
notna = notnull
@derived_from(DataFrame)
def toPandas(self):
df = self._spark_select(self._metadata.all_fields)
pdf = df._spark_toPandas()
if len(pdf) == 0 and len(df.schema) > 0:
# TODO: push to OSS
pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()
for field in df.schema})
if len(self._metadata.index_info) > 0:
append = False
for index_field in self._metadata.index_fields:
drop = index_field not in self._metadata.column_fields
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[self._metadata.column_fields]
index_names = self._metadata.index_names
if len(index_names) > 0:
if isinstance(pdf.index, pd.MultiIndex):
pdf.index.names = index_names
else:
pdf.index.name = index_names[0]
return pdf
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (isinstance(v, (Column,)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
df = self
for (name, c) in pairs:
df = df._spark_withColumn(name, c)
df._metadata = self._metadata.copy(
column_fields=(self._metadata.column_fields +
[name for name, _ in pairs if name not in self._metadata.column_fields]))
return df
@property
def loc(self):
return SparkDataFrameLocator(self)
def copy(self):
df = DataFrame(self._jdf, self.sql_ctx)
df._metadata = self._metadata.copy()
return df
@derived_from(pd.DataFrame)
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, string_types):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._metadata.column_fields]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F._spark_when(self[column].notna(), 1)._spark_otherwise(0)
for column in columns],
F._spark_lit(0))
if thresh is not None:
pred = cnt >= F._spark_lit(int(thresh))
elif how == 'any':
pred = cnt == F._spark_lit(len(columns))
elif how == 'all':
pred = cnt > F._spark_lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
df = self._spark_filter(pred)
df._metadata = self._metadata.copy()
if inplace:
_reassign_jdf(self, df)
else:
return df
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
def head(self, n=5):
df = self._spark_limit(n)
df._metadata = self._metadata.copy()
return df
@property
def columns(self):
return pd.Index(self._metadata.column_fields)
@columns.setter
def columns(self, names):
old_names = self._metadata.column_fields
if len(old_names) != len(names):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(names)))
df = self
for (old_name, new_name) in zip(old_names, names):
df = df._spark_withColumnRenamed(old_name, new_name)
df._metadata = self._metadata.copy(column_fields=names)
_reassign_jdf(self, df)
def count(self):
return self._spark_count()
def unique(self):
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0, errors='raise'):
axis = self._validate_axis(axis)
if axis == 1:
if isinstance(labels, list):
df = self._spark_drop(*labels)
df._metadata = self._metadata.copy(
column_fields=[column for column in self._metadata.column_fields
if column not in labels])
else:
df = self._spark_drop(labels)
df._metadata = self._metadata.copy(
column_fields=[column for column in self._metadata.column_fields
if column != labels])
return df
# return self.map_partitions(M.drop, labels, axis=axis, errors=errors)
raise NotImplementedError("Drop currently only works for axis=1")
@derived_from(pd.DataFrame)
def get(self, key, default=None):
try:
return anchor_wrap(self, self._pd_getitem(key))
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by):
df = self._spark_sort(by)
df._metadata = self._metadata
return df
def groupby(self, by):
gp = self._spark_groupby(by)
from .groups import PandasLikeGroupBy
return PandasLikeGroupBy(self, gp, None)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
@property
def shape(self):
return len(self), len(self.columns)
def _pd_getitem(self, key):
if key is None:
raise KeyError("none key")
if isinstance(key, string_types):
try:
return self._spark_getitem(key)
except AnalysisException:
raise KeyError(key)
if np.isscalar(key) or isinstance(key, (tuple, string_types)):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return self._spark_getitem(key)
if isinstance(key, Column):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key.cast("boolean")
df = self._spark_getitem(bcol)
df._metadata = self._metadata
return anchor_wrap(self, df)
raise NotImplementedError(key)
def __getitem__(self, key):
return anchor_wrap(self, self._pd_getitem(key))
def __setitem__(self, key, value):
# For now, we don't support realignment against different dataframes.
# This is too expensive in Spark.
# Are we assigning against a column?
if isinstance(value, Column):
assert value._pandas_anchor is self,\
"Cannot combine column argument because it comes from a different dataframe"
if isinstance(key, (tuple, list)):
assert isinstance(value.schema, StructType)
field_names = value.schema.fieldNames()
df = self.assign(**{k: value[c]
for k, c in zip(key, field_names)})
else:
df = self.assign(**{key: value})
_reassign_jdf(self, df)
def __getattr__(self, key):
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
return anchor_wrap(self, self._spark_getattr(key))
def __iter__(self):
return self.toPandas().__iter__()
def __len__(self):
return self._spark_count()
def __dir__(self):
fields = [f for f in self.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
def _repr_html_(self):
return self.head(max_display_count).toPandas()._repr_html_()
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
def _reassign_jdf(target_df: DataFrame, new_df: DataFrame):
"""
Reassigns the java df contont of a dataframe.
"""
target_df._jdf = new_df._jdf
target_df._metadata = new_df._metadata
# Reset the cached variables
target_df._schema = None
target_df._lazy_rdd = None
def _spark_col_apply(col_or_df, sfun):
"""
Performs a function to all cells on a dataframe, the function being a known sql function.
"""
if isinstance(col_or_df, Column):
return sfun(col_or_df)
assert isinstance(col_or_df, DataFrame)
df = col_or_df
df = df._spark_select([sfun(df[col]).alias(col) for col in df.columns])
return df
def _reduce_spark(col_or_df, sfun):
"""
Performs a reduction on a dataframe, the function being a known sql function.
"""
if isinstance(col_or_df, Column):
col = col_or_df
df0 = col._spark_ref_dataframe._spark_select(sfun(col))
else:
assert isinstance(col_or_df, DataFrame)
df = col_or_df
df0 = df._spark_select(sfun("*"))
return _unpack_scalar(df0)
def _unpack_scalar(df):
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = df.head(2).collect()
assert len(l) == 1, (df, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
def anchor_wrap(df, col):
"""
Ensures that the column has an anchoring reference to the dataframe.
This is required to get self-representable columns.
:param df: dataframe or column
:param col: a column
:return: column
"""
if isinstance(col, Column):
if isinstance(df, Column):
ref = df._pandas_anchor
else:
assert isinstance(df, DataFrame), type(df)
ref = df
col._spark_ref_dataframe = ref
return col
def _col(df):
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
return df[df.columns[0]]
| 1 | 8,234 | why hidden name? How about `index_name`? | databricks-koalas | py |
@@ -367,6 +367,9 @@ ExWorkProcRetcode ExHdfsScanTcb::work()
char cursorId[8];
HdfsFileInfo *hdfo = NULL;
Lng32 openType = 0;
+ int hiveScanMode = 0;
+
+ hiveScanMode = CmpCommon::getDefaultLong(HIVE_SCAN_SPECIAL_MODE);
while (!qparent_.down->isEmpty())
{ | 1 | // **********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
// **********************************************************************
#include "Platform.h"
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <poll.h>
#include <iostream>
#include "ex_stdh.h"
#include "ComTdb.h"
#include "ex_tcb.h"
#include "ExHdfsScan.h"
#include "ex_exe_stmt_globals.h"
#include "ExpLOBinterface.h"
#include "SequenceFileReader.h"
#include "Hbase_types.h"
#include "stringBuf.h"
#include "NLSConversion.h"
//#include "hdfs.h"
#include "ExpORCinterface.h"
ex_tcb * ExHdfsScanTdb::build(ex_globals * glob)
{
ExExeStmtGlobals * exe_glob = glob->castToExExeStmtGlobals();
ex_assert(exe_glob,"This operator cannot be in DP2");
ExHdfsScanTcb *tcb = NULL;
if ((isTextFile()) || (isSequenceFile()))
{
tcb = new(exe_glob->getSpace())
ExHdfsScanTcb(
*this,
exe_glob);
}
else if (isOrcFile())
{
tcb = new(exe_glob->getSpace())
ExOrcScanTcb(
*this,
exe_glob);
}
ex_assert(tcb, "Error building ExHdfsScanTcb.");
return (tcb);
}
ex_tcb * ExOrcFastAggrTdb::build(ex_globals * glob)
{
ExHdfsScanTcb *tcb = NULL;
tcb = new(glob->getSpace())
ExOrcFastAggrTcb(
*this,
glob);
ex_assert(tcb, "Error building ExHdfsScanTcb.");
return (tcb);
}
////////////////////////////////////////////////////////////////
// Constructor and initialization.
////////////////////////////////////////////////////////////////
ExHdfsScanTcb::ExHdfsScanTcb(
const ComTdbHdfsScan &hdfsScanTdb,
ex_globals * glob ) :
ex_tcb( hdfsScanTdb, 1, glob)
, workAtp_(NULL)
, bytesLeft_(0)
, hdfsScanBuffer_(NULL)
, hdfsBufNextRow_(NULL)
, hdfsLoggingRow_(NULL)
, hdfsLoggingRowEnd_(NULL)
, debugPrevRow_(NULL)
, hdfsSqlBuffer_(NULL)
, hdfsSqlData_(NULL)
, pool_(NULL)
, step_(NOT_STARTED)
, matches_(0)
, matchBrkPoint_(0)
, endOfRequestedRange_(NULL)
, sequenceFileReader_(NULL)
, seqScanAgain_(false)
, hdfo_(NULL)
, numBytesProcessedInRange_(0)
, exception_(FALSE)
, checkRangeDelimiter_(FALSE)
{
Space * space = (glob ? glob->getSpace() : 0);
CollHeap * heap = (glob ? glob->getDefaultHeap() : 0);
const int readBufSize = (Int32)hdfsScanTdb.hdfsBufSize_;
hdfsScanBuffer_ = new(space) char[ readBufSize + 1 ];
hdfsScanBuffer_[readBufSize] = '\0';
moveExprColsBuffer_ = new(space) ExSimpleSQLBuffer( 1, // one row
(Int32)hdfsScanTdb.moveExprColsRowLength_,
space);
short error = moveExprColsBuffer_->getFreeTuple(moveExprColsTupp_);
ex_assert((error == 0), "get_free_tuple cannot hold a row.");
moveExprColsData_ = moveExprColsTupp_.getDataPointer();
hdfsSqlBuffer_ = new(space) ExSimpleSQLBuffer( 1, // one row
(Int32)hdfsScanTdb.hdfsSqlMaxRecLen_,
space);
error = hdfsSqlBuffer_->getFreeTuple(hdfsSqlTupp_);
ex_assert((error == 0), "get_free_tuple cannot hold a row.");
hdfsSqlData_ = hdfsSqlTupp_.getDataPointer();
hdfsAsciiSourceBuffer_ = new(space) ExSimpleSQLBuffer( 1, // one row
(Int32)hdfsScanTdb.asciiRowLen_ * 2, // just in case
space);
error = hdfsAsciiSourceBuffer_->getFreeTuple(hdfsAsciiSourceTupp_);
ex_assert((error == 0), "get_free_tuple cannot hold a row.");
hdfsAsciiSourceData_ = hdfsAsciiSourceTupp_.getDataPointer();
pool_ = new(space)
sql_buffer_pool(hdfsScanTdb.numBuffers_,
hdfsScanTdb.bufferSize_,
space,
((ExHdfsScanTdb &)hdfsScanTdb).denseBuffers() ?
SqlBufferBase::DENSE_ : SqlBufferBase::NORMAL_);
pool_->setStaticMode(TRUE);
defragTd_ = NULL;
// removing the cast produce a compile error
if (((ExHdfsScanTdb &)hdfsScanTdb).useCifDefrag())
{
defragTd_ = pool_->addDefragTuppDescriptor(hdfsScanTdb.outputRowLength_);
}
// Allocate the queue to communicate with parent
allocateParentQueues(qparent_);
workAtp_ = allocateAtp(hdfsScanTdb.workCriDesc_, space);
// fixup expressions
if (selectPred())
selectPred()->fixup(0, getExpressionMode(), this, space, heap, FALSE, glob);
if (moveExpr())
moveExpr()->fixup(0, getExpressionMode(), this, space, heap, FALSE, glob);
if (convertExpr())
convertExpr()->fixup(0, getExpressionMode(), this, space, heap, FALSE, glob);
if (moveColsConvertExpr())
moveColsConvertExpr()->fixup(0, getExpressionMode(), this, space, heap, FALSE, glob);
// Register subtasks with the scheduler
registerSubtasks();
registerResizeSubtasks();
Lng32 fileNum = getGlobals()->castToExExeStmtGlobals()->getMyInstanceNumber();
ExHbaseAccessTcb::buildLoggingPath(((ExHdfsScanTdb &)hdfsScanTdb).getLoggingLocation(),
(char *)((ExHdfsScanTdb &)hdfsScanTdb).getErrCountRowId(),
((ExHdfsScanTdb &)hdfsScanTdb).tableName(),
"hive_scan_err",
fileNum,
loggingFileName_);
LoggingFileCreated_ = FALSE;
//shoud be move to work method
int jniDebugPort = 0;
int jniDebugTimeout = 0;
ehi_ = ExpHbaseInterface::newInstance(glob->getDefaultHeap(),
(char*)"", //Later replace with server cqd
(char*)"", ////Later replace with port cqd
jniDebugPort,
jniDebugTimeout);
}
ExHdfsScanTcb::~ExHdfsScanTcb()
{
freeResources();
}
void ExHdfsScanTcb::freeResources()
{
if (workAtp_)
{
workAtp_->release();
deallocateAtp(workAtp_, getSpace());
workAtp_ = NULL;
}
if (hdfsScanBuffer_)
{
NADELETEBASIC(hdfsScanBuffer_, getSpace());
hdfsScanBuffer_ = NULL;
}
if (hdfsAsciiSourceBuffer_)
{
NADELETEBASIC(hdfsAsciiSourceBuffer_, getSpace());
hdfsAsciiSourceBuffer_ = NULL;
}
// hdfsSqlTupp_.release() ; // ???
if (hdfsSqlBuffer_)
{
delete hdfsSqlBuffer_;
hdfsSqlBuffer_ = NULL;
}
if (moveExprColsBuffer_)
{
delete moveExprColsBuffer_;
moveExprColsBuffer_ = NULL;
}
if (pool_)
{
delete pool_;
pool_ = NULL;
}
if (qparent_.up)
{
delete qparent_.up;
qparent_.up = NULL;
}
if (qparent_.down)
{
delete qparent_.down;
qparent_.down = NULL;
}
ExpLOBinterfaceCleanup
(lobGlob_, getGlobals()->getDefaultHeap());
}
NABoolean ExHdfsScanTcb::needStatsEntry()
{
// stats are collected for ALL and OPERATOR options.
if ((getGlobals()->getStatsArea()->getCollectStatsType() ==
ComTdb::ALL_STATS) ||
(getGlobals()->getStatsArea()->getCollectStatsType() ==
ComTdb::OPERATOR_STATS))
return TRUE;
else if ( getGlobals()->getStatsArea()->getCollectStatsType() == ComTdb::PERTABLE_STATS)
return TRUE;
else
return FALSE;
}
ExOperStats * ExHdfsScanTcb::doAllocateStatsEntry(
CollHeap *heap,
ComTdb *tdb)
{
ExOperStats * stats = NULL;
ExHdfsScanTdb * myTdb = (ExHdfsScanTdb*) tdb;
return new(heap) ExHdfsScanStats(heap,
this,
tdb);
ComTdb::CollectStatsType statsType =
getGlobals()->getStatsArea()->getCollectStatsType();
if (statsType == ComTdb::OPERATOR_STATS)
{
return ex_tcb::doAllocateStatsEntry(heap, tdb);
}
else if (statsType == ComTdb::PERTABLE_STATS)
{
// sqlmp style per-table stats, one entry per table
stats = new(heap) ExPertableStats(heap,
this,
tdb);
((ExOperStatsId*)(stats->getId()))->tdbId_ = tdb->getPertableStatsTdbId();
return stats;
}
else
{
ExHdfsScanTdb * myTdb = (ExHdfsScanTdb*) tdb;
return new(heap) ExHdfsScanStats(heap,
this,
tdb);
}
}
void ExHdfsScanTcb::registerSubtasks()
{
ExScheduler *sched = getGlobals()->getScheduler();
sched->registerInsertSubtask(sWork, this, qparent_.down,"PD");
sched->registerUnblockSubtask(sWork, this, qparent_.up, "PU");
sched->registerCancelSubtask(sWork, this, qparent_.down,"CN");
}
ex_tcb_private_state *ExHdfsScanTcb::allocatePstates(
Lng32 &numElems, // inout, desired/actual elements
Lng32 &pstateLength) // out, length of one element
{
PstateAllocator<ex_tcb_private_state> pa;
return pa.allocatePstates(this, numElems, pstateLength);
}
Int32 ExHdfsScanTcb::fixup()
{
lobGlob_ = NULL;
ExpLOBinterfaceInit
(lobGlob_, getGlobals()->getDefaultHeap(),TRUE);
return 0;
}
void brkpoint()
{}
short ExHdfsScanTcb::setupError(Lng32 exeError, Lng32 retcode,
const char * str, const char * str2, const char * str3)
{
// Make sure retcode is positive.
if (retcode < 0)
retcode = -retcode;
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
Lng32 intParam1 = retcode;
Lng32 intParam2 = 0;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(exeError), NULL, &intParam1,
&intParam2, NULL,
(str ? (char*)str : (char*)" "),
(str2 ? (char*)str2 : (char*)" "),
(str3 ? (char*)str3 : (char*)" "));
pentry_down->setDiagsArea(diagsArea);
return -1;
}
ExWorkProcRetcode ExHdfsScanTcb::work()
{
Lng32 retcode = 0;
SFR_RetCode sfrRetCode = SFR_OK;
char *errorDesc = NULL;
char cursorId[8];
HdfsFileInfo *hdfo = NULL;
Lng32 openType = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
hdfsStats_ = NULL;
if (getStatsEntry())
hdfsStats_ = getStatsEntry()->castToExHdfsScanStats();
ex_assert(hdfsStats_, "hdfs stats cannot be null");
if (hdfsStats_)
hdfsStats_->init();
beginRangeNum_ = -1;
numRanges_ = -1;
hdfsOffset_ = 0;
checkRangeDelimiter_ = FALSE;
if (hdfsScanTdb().getHdfsFileInfoList()->isEmpty())
{
step_ = DONE;
break;
}
myInstNum_ = getGlobals()->getMyInstanceNumber();
beginRangeNum_ =
*(Lng32*)hdfsScanTdb().getHdfsFileRangeBeginList()->get(myInstNum_);
numRanges_ =
*(Lng32*)hdfsScanTdb().getHdfsFileRangeNumList()->get(myInstNum_);
currRangeNum_ = beginRangeNum_;
hdfsScanBufMaxSize_ = hdfsScanTdb().hdfsBufSize_;
if (numRanges_ > 0)
step_ = INIT_HDFS_CURSOR;
else
step_ = DONE;
}
break;
case INIT_HDFS_CURSOR:
{
hdfo_ = (HdfsFileInfo*)
hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_);
if ((hdfo_->getBytesToRead() == 0) &&
(beginRangeNum_ == currRangeNum_) && (numRanges_ > 1))
{
// skip the first range if it has 0 bytes to read
// doing this for subsequent ranges is more complex
// since the file may neeed to be closed. The first
// range being 0 is common with sqoop generated files
currRangeNum_++;
hdfo_ = (HdfsFileInfo*)
hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_);
}
hdfsOffset_ = hdfo_->getStartOffset();
bytesLeft_ = hdfo_->getBytesToRead();
hdfsFileName_ = hdfo_->fileName();
sprintf(cursorId_, "%d", currRangeNum_);
stopOffset_ = hdfsOffset_ + hdfo_->getBytesToRead();
step_ = OPEN_HDFS_CURSOR;
}
break;
case OPEN_HDFS_CURSOR:
{
retcode = 0;
if (isSequenceFile() && !sequenceFileReader_)
{
sequenceFileReader_ = new(getSpace())
SequenceFileReader((NAHeap *)getSpace());
sfrRetCode = sequenceFileReader_->init();
if (sfrRetCode != JNI_OK)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
NULL, NULL, NULL, sequenceFileReader_->getErrorText(sfrRetCode), NULL);
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
if (isSequenceFile())
{
sfrRetCode = sequenceFileReader_->open(hdfsFileName_);
if (sfrRetCode == JNI_OK)
{
// Seek to start offset
sfrRetCode = sequenceFileReader_->seeknSync(hdfsOffset_);
}
if (sfrRetCode != JNI_OK)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
NULL, NULL, NULL, sequenceFileReader_->getErrorText(sfrRetCode), NULL);
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
else
{
openType = 2; // must open
retcode = ExpLOBInterfaceSelectCursor
(lobGlob_,
hdfsFileName_, //hdfsScanTdb().hdfsFileName_,
NULL, //(char*)"",
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
0, NULL, // handle not valid for non lob access
bytesLeft_, // max bytes
cursorId_,
requestTag_, Lob_Memory,
0, // not check status
(NOT hdfsScanTdb().hdfsPrefetch()), //1, // waited op
hdfsOffset_,
hdfsScanBufMaxSize_,
bytesRead_,
NULL,
1, // open
openType //
);
// preopen next range.
if ( (currRangeNum_ + 1) < (beginRangeNum_ + numRanges_) )
{
hdfo = (HdfsFileInfo*)
hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_ + 1);
hdfsFileName_ = hdfo->fileName();
sprintf(cursorId, "%d", currRangeNum_ + 1);
openType = 1; // preOpen
retcode = ExpLOBInterfaceSelectCursor
(lobGlob_,
hdfsFileName_, //hdfsScanTdb().hdfsFileName_,
NULL, //(char*)"",
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
0, NULL,//handle not relevant for non lob access
hdfo->getBytesToRead(), // max bytes
cursorId,
requestTag_, Lob_Memory,
0, // not check status
(NOT hdfsScanTdb().hdfsPrefetch()), //1, // waited op
hdfo->getStartOffset(),
hdfsScanBufMaxSize_,
bytesRead_,
NULL,
1,// open
openType
);
hdfsFileName_ = hdfo_->fileName();
}
}
if (retcode < 0)
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE), NULL,
&intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBInterfaceSelectCursor/open",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
trailingPrevRead_ = 0;
firstBufOfFile_ = true;
numBytesProcessedInRange_ = 0;
step_ = GET_HDFS_DATA;
}
break;
case GET_HDFS_DATA:
{
Int64 bytesToRead = hdfsScanBufMaxSize_ - trailingPrevRead_;
ex_assert(bytesToRead >= 0, "bytesToRead less than zero.");
if (hdfo_->fileIsSplitEnd() && !isSequenceFile())
{
if (bytesLeft_ > 0)
bytesToRead = min(bytesToRead,
(bytesLeft_ + hdfsScanTdb().rangeTailIOSize_));
else
bytesToRead = hdfsScanTdb().rangeTailIOSize_;
}
else
{
ex_assert(bytesLeft_ >= 0, "Bad assumption at e-o-f");
if (bytesToRead > bytesLeft_ +
1 // plus one for end-of-range files with no
// record delimiter at eof.
)
bytesToRead = bytesLeft_ + 1;
}
ex_assert(bytesToRead + trailingPrevRead_ <= hdfsScanBufMaxSize_,
"too many bites.");
if (hdfsStats_)
hdfsStats_->getHdfsTimer().start();
retcode = 0;
if (isSequenceFile())
{
sfrRetCode = sequenceFileReader_->fetchRowsIntoBuffer(stopOffset_,
hdfsScanBuffer_,
hdfsScanBufMaxSize_, //bytesToRead,
bytesRead_,
hdfsScanTdb().recordDelimiter_);
if (sfrRetCode != JNI_OK && sfrRetCode != SFR_NOMORE)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
NULL, NULL, NULL, sequenceFileReader_->getErrorText(sfrRetCode), NULL);
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
else
{
seqScanAgain_ = (sfrRetCode != SFR_NOMORE);
}
}
else
{
retcode = ExpLOBInterfaceSelectCursor
(lobGlob_,
hdfsFileName_,
NULL,
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
0, NULL,
0, cursorId_,
requestTag_, Lob_Memory,
0, // not check status
(NOT hdfsScanTdb().hdfsPrefetch()), //1, // waited op
hdfsOffset_,
bytesToRead,
bytesRead_,
hdfsScanBuffer_ + trailingPrevRead_,
2, // read
0 // openType, not applicable for read
);
if (hdfsStats_)
hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
if (retcode < 0)
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE), NULL,
&intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBInterfaceSelectCursor/read",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (bytesRead_ <= 0)
{
// Finished with this file. Unexpected? Warning/event?
step_ = CLOSE_HDFS_CURSOR;
break;
}
else
{
char * lastByteRead = hdfsScanBuffer_ +
trailingPrevRead_ + bytesRead_ - 1;
if ((bytesRead_ < bytesToRead) &&
(*lastByteRead != hdfsScanTdb().recordDelimiter_))
{
// Some files end without a record delimiter but
// hive treats the end-of-file as a record delimiter.
lastByteRead[1] = hdfsScanTdb().recordDelimiter_;
bytesRead_++;
}
if (bytesRead_ > bytesLeft_)
{
if (isSequenceFile())
endOfRequestedRange_ = hdfsScanBuffer_ + bytesRead_;
else
endOfRequestedRange_ = hdfsScanBuffer_ +
trailingPrevRead_ + bytesLeft_;
}
else
endOfRequestedRange_ = NULL;
if (isSequenceFile())
{
// If the file is compressed, we don't know the real value
// of bytesLeft_, but it doesn't really matter.
if (seqScanAgain_ == false)
bytesLeft_ = 0;
}
else
bytesLeft_ -= bytesRead_;
}
if (hdfsStats_)
hdfsStats_->incBytesRead(bytesRead_);
if (firstBufOfFile_ && hdfo_->fileIsSplitBegin() && !isSequenceFile())
{
// Position in the hdfsScanBuffer_ to the
// first record delimiter.
hdfsBufNextRow_ = hdfs_strchr(hdfsScanBuffer_,
hdfsScanTdb().recordDelimiter_, hdfsScanBuffer_+trailingPrevRead_+ bytesRead_, checkRangeDelimiter_);
// May be that the record is too long? Or data isn't ascii?
// Or delimiter is incorrect.
if (! hdfsBufNextRow_)
{
ComDiagsArea *diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(8446), NULL,
NULL, NULL, NULL,
(char*)"No record delimiter found in buffer from hdfsRead.",
NULL);
// no need to log errors in this case (bulk load) since this is a major issue
// and need to be correxted
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
hdfsBufNextRow_ += 1; // point past record delimiter.
}
else
hdfsBufNextRow_ = hdfsScanBuffer_;
debugPrevRow_ = hdfsScanBuffer_; // By convention, at
// beginning of scan, the
// prev is set to next.
debugtrailingPrevRead_ = 0;
debugPenultimatePrevRow_ = NULL;
firstBufOfFile_ = false;
hdfsOffset_ += bytesRead_;
step_ = PROCESS_HDFS_ROW;
}
break;
case PROCESS_HDFS_ROW:
{
exception_ = FALSE;
nextStep_ = NOT_STARTED;
debugPenultimatePrevRow_ = debugPrevRow_;
debugPrevRow_ = hdfsBufNextRow_;
int formattedRowLength = 0;
ComDiagsArea *transformDiags = NULL;
int err = 0;
char *startOfNextRow =
extractAndTransformAsciiSourceToSqlRow(err, transformDiags);
bool rowWillBeSelected = true;
lastErrorCnd_ = NULL;
if(err)
{
if (hdfsScanTdb().continueOnError())
{
Lng32 errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount>0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
exception_ = TRUE;
rowWillBeSelected = false;
}
else
{
if (transformDiags)
pentry_down->setDiagsArea(transformDiags);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (startOfNextRow == NULL)
{
step_ = REPOS_HDFS_DATA;
if (!exception_)
break;
}
else
{
numBytesProcessedInRange_ +=
startOfNextRow - hdfsBufNextRow_;
hdfsBufNextRow_ = startOfNextRow;
}
if (exception_)
{
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
break;
}
if (hdfsStats_)
hdfsStats_->incAccessedRows();
workAtp_->getTupp(hdfsScanTdb().workAtpIndex_) =
hdfsSqlTupp_;
if ((rowWillBeSelected) && (selectPred()))
{
ex_expr::exp_return_type evalRetCode =
selectPred()->eval(pentry_down->getAtp(), workAtp_);
if (evalRetCode == ex_expr::EXPR_FALSE)
rowWillBeSelected = false;
else if (evalRetCode == ex_expr::EXPR_ERROR)
{
if (hdfsScanTdb().continueOnError())
{
if (pentry_down->getDiagsArea() || workAtp_->getDiagsArea())
{
Lng32 errorCount = 0;
if (pentry_down->getDiagsArea())
{
errorCount = pentry_down->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = pentry_down->getDiagsArea()->getErrorEntry(errorCount);
}
else
{
errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
}
}
exception_ = TRUE;
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
rowWillBeSelected = false;
break;
}
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
else
ex_assert(evalRetCode == ex_expr::EXPR_TRUE,
"invalid return code from expr eval");
}
if (rowWillBeSelected)
{
if (moveColsConvertExpr())
{
ex_expr::exp_return_type evalRetCode =
moveColsConvertExpr()->eval(workAtp_, workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
if (hdfsScanTdb().continueOnError())
{
if ( workAtp_->getDiagsArea())
{
Lng32 errorCount = 0;
errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
}
exception_ = TRUE;
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
break;
}
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (hdfsStats_)
hdfsStats_->incUsedRows();
step_ = RETURN_ROW;
break;
}
break;
}
case RETURN_ROW:
{
if (qparent_.up->isFull())
return WORK_OK;
lastErrorCnd_ = NULL;
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
queue_index saveParentIndex = up_entry->upState.parentIndex;
queue_index saveDownIndex = up_entry->upState.downIndex;
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_OK_MMORE;
if (moveExpr())
{
UInt32 maxRowLen = hdfsScanTdb().outputRowLength_;
UInt32 rowLen = maxRowLen;
if (hdfsScanTdb().useCifDefrag() &&
!pool_->currentBufferHasEnoughSpace((Lng32)hdfsScanTdb().outputRowLength_))
{
up_entry->getTupp(hdfsScanTdb().tuppIndex_) = defragTd_;
defragTd_->setReferenceCount(1);
ex_expr::exp_return_type evalRetCode =
moveExpr()->eval(up_entry->getAtp(), workAtp_,0,-1,&rowLen);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
if (hdfsScanTdb().continueOnError())
{
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
step_ = CLOSE_HDFS_CURSOR;
else
step_ = PROCESS_HDFS_ROW;
up_entry->upState.parentIndex =saveParentIndex ;
up_entry->upState.downIndex = saveDownIndex ;
if (up_entry->getDiagsArea() || workAtp_->getDiagsArea())
{
Lng32 errorCount = 0;
if (up_entry->getDiagsArea())
{
errorCount = up_entry->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = up_entry->getDiagsArea()->getErrorEntry(errorCount);
}
else
{
errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
}
}
exception_ = TRUE;
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
break;
}
else
{
// Get diags from up_entry onto pentry_down, which
// is where the HANDLE_ERROR step expects it.
ComDiagsArea *diagsArea = pentry_down->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
pentry_down->setDiagsArea (diagsArea);
}
pentry_down->getDiagsArea()->
mergeAfter(*up_entry->getDiagsArea());
up_entry->setDiagsArea(NULL);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
if (pool_->get_free_tuple(
up_entry->getTupp(hdfsScanTdb().tuppIndex_),
rowLen))
return WORK_POOL_BLOCKED;
str_cpy_all(up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer(),
defragTd_->getTupleAddress(),
rowLen);
}
}
else
{
if (pool_->get_free_tuple(
up_entry->getTupp(hdfsScanTdb().tuppIndex_),
(Lng32)hdfsScanTdb().outputRowLength_))
return WORK_POOL_BLOCKED;
ex_expr::exp_return_type evalRetCode =
moveExpr()->eval(up_entry->getAtp(), workAtp_,0,-1,&rowLen);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
if (hdfsScanTdb().continueOnError())
{
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
step_ = CLOSE_FILE;
else
step_ = PROCESS_HDFS_ROW;
if (up_entry->getDiagsArea() || workAtp_->getDiagsArea())
{
Lng32 errorCount = 0;
if (up_entry->getDiagsArea())
{
errorCount = up_entry->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = up_entry->getDiagsArea()->getErrorEntry(errorCount);
}
else
{
errorCount = workAtp_->getDiagsArea()->getNumber(DgSqlCode::ERROR_);
if (errorCount > 0)
lastErrorCnd_ = workAtp_->getDiagsArea()->getErrorEntry(errorCount);
}
}
up_entry->upState.parentIndex =saveParentIndex ;
up_entry->upState.downIndex = saveDownIndex ;
exception_ = TRUE;
nextStep_ = step_;
step_ = HANDLE_EXCEPTION;
break;
}
else
{
// Get diags from up_entry onto pentry_down, which
// is where the HANDLE_ERROR step expects it.
ComDiagsArea *diagsArea = pentry_down->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
pentry_down->setDiagsArea (diagsArea);
}
pentry_down->getDiagsArea()->
mergeAfter(*up_entry->getDiagsArea());
up_entry->setDiagsArea(NULL);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (hdfsScanTdb().useCif() && rowLen != maxRowLen)
{
pool_->resizeLastTuple(rowLen,
up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer());
}
}
}
up_entry->upState.setMatchNo(++matches_);
if (matches_ == matchBrkPoint_)
brkpoint();
qparent_.up->insert();
// use ExOperStats now, to cover OPERATOR stats as well as
// ALL stats.
if (getStatsEntry())
getStatsEntry()->incActualRowsReturned();
workAtp_->setDiagsArea(NULL); // get rid of warnings.
if (((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_)) ||
(pentry_down->downState.request == ex_queue::GET_NOMORE))
step_ = CLOSE_HDFS_CURSOR;
else
step_ = PROCESS_HDFS_ROW;
break;
}
case REPOS_HDFS_DATA:
{
bool scanAgain = false;
if (isSequenceFile())
scanAgain = seqScanAgain_;
else
{
if (hdfo_->fileIsSplitEnd())
{
if (numBytesProcessedInRange_ < hdfo_->getBytesToRead())
scanAgain = true;
}
else
if (bytesLeft_ > 0)
scanAgain = true;
}
if (scanAgain)
{
// Get ready for another gulp of hdfs data.
debugtrailingPrevRead_ = trailingPrevRead_;
trailingPrevRead_ = bytesRead_ -
(hdfsBufNextRow_ -
(hdfsScanBuffer_ + trailingPrevRead_));
// Move trailing data from the end of buffer to the front.
// The GET_HDFS_DATA step will use trailingPrevRead_ to
// adjust the read buffer ptr so that the next read happens
// contiguously to the final byte of the prev read. It will
// also use trailingPrevRead_ to to adjust the size of
// the next read so that fixed size buffer is not overrun.
// Finally, trailingPrevRead_ is used in the
// extractSourceFields method to keep from processing
// bytes left in the buffer from the previous read.
if ((trailingPrevRead_ > 0) &&
(hdfsBufNextRow_[0] == RANGE_DELIMITER))
{
checkRangeDelimiter_ = FALSE;
step_ = CLOSE_HDFS_CURSOR;
break;
}
memmove(hdfsScanBuffer_, hdfsBufNextRow_,
(size_t)trailingPrevRead_);
step_ = GET_HDFS_DATA;
}
else
{
trailingPrevRead_ = 0;
step_ = CLOSE_HDFS_CURSOR;
}
break;
}
case CLOSE_HDFS_CURSOR:
{
retcode = 0;
if (isSequenceFile())
{
sfrRetCode = sequenceFileReader_->close();
if (sfrRetCode != JNI_OK)
{
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
NULL, NULL, NULL, sequenceFileReader_->getErrorText(sfrRetCode), NULL);
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
else
{
retcode = ExpLOBInterfaceSelectCursor
(lobGlob_,
hdfsFileName_,
NULL,
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_,
0,NULL, //handle not relevant for non lob access
0, cursorId_,
requestTag_, Lob_Memory,
0, // not check status
(NOT hdfsScanTdb().hdfsPrefetch()), //1, // waited op
0,
hdfsScanBufMaxSize_,
bytesRead_,
hdfsScanBuffer_,
3, // close
0); // openType, not applicable for close
if (retcode < 0)
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE), NULL,
&intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBInterfaceSelectCursor/close",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
step_ = HANDLE_ERROR;
break;
}
}
step_ = CLOSE_FILE;
}
break;
case HANDLE_EXCEPTION:
{
step_ = nextStep_;
exception_ = FALSE;
if (hdfsScanTdb().getMaxErrorRows() > 0)
{
Int64 exceptionCount = 0;
ExHbaseAccessTcb::incrErrorCount( ehi_,exceptionCount,
hdfsScanTdb().getErrCountTable(),hdfsScanTdb().getErrCountRowId());
if (exceptionCount > hdfsScanTdb().getMaxErrorRows())
{
if (pentry_down->getDiagsArea())
pentry_down->getDiagsArea()->clear();
if (workAtp_->getDiagsArea())
workAtp_->getDiagsArea()->clear();
ComDiagsArea *da = workAtp_->getDiagsArea();
if(!da)
{
da = ComDiagsArea::allocate(getHeap());
workAtp_->setDiagsArea(da);
}
*da << DgSqlCode(-EXE_MAX_ERROR_ROWS_EXCEEDED);
step_ = HANDLE_ERROR_WITH_CLOSE;
break;
}
}
if (hdfsScanTdb().getLogErrorRows())
{
int loggingRowLen = hdfsLoggingRowEnd_ - hdfsLoggingRow_ +1;
ExHbaseAccessTcb::handleException((NAHeap *)getHeap(), hdfsLoggingRow_,
loggingRowLen, lastErrorCnd_,
ehi_,
LoggingFileCreated_,
loggingFileName_);
}
if (pentry_down->getDiagsArea())
pentry_down->getDiagsArea()->clear();
if (workAtp_->getDiagsArea())
workAtp_->getDiagsArea()->clear();
}
break;
case HANDLE_ERROR_WITH_CLOSE:
case HANDLE_ERROR:
{
if (qparent_.up->isFull())
return WORK_OK;
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
if (workAtp_->getDiagsArea())
{
ComDiagsArea *diagsArea = up_entry->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
up_entry->setDiagsArea (diagsArea);
}
up_entry->getDiagsArea()->mergeAfter(*workAtp_->getDiagsArea());
workAtp_->setDiagsArea(NULL);
}
up_entry->upState.status = ex_queue::Q_SQLERROR;
qparent_.up->insert();
if (step_ == HANDLE_ERROR_WITH_CLOSE)
step_ = CLOSE_HDFS_CURSOR;
else
step_ = ERROR_CLOSE_FILE;
break;
}
case CLOSE_FILE:
case ERROR_CLOSE_FILE:
{
if (getStatsEntry())
{
ExHdfsScanStats * stats =
getStatsEntry()->castToExHdfsScanStats();
if (stats)
{
ExLobStats s;
s.init();
retcode = ExpLOBinterfaceStats
(lobGlob_,
&s,
hdfsFileName_, //hdfsScanTdb().hdfsFileName_,
NULL, //(char*)"",
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_);
*stats->lobStats() = *stats->lobStats() + s;
}
}
// if next file is not same as current file, then close the current file.
bool closeFile = true;
if ( (step_ == CLOSE_FILE) &&
((currRangeNum_ + 1) < (beginRangeNum_ + numRanges_)))
{
hdfo = (HdfsFileInfo*) hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_ + 1);
if (strcmp(hdfsFileName_, hdfo->fileName()) == 0)
closeFile = false;
}
if (closeFile)
{
retcode = ExpLOBinterfaceCloseFile
(lobGlob_,
hdfsFileName_,
NULL,
(Lng32)Lob_External_HDFS_File,
hdfsScanTdb().hostName_,
hdfsScanTdb().port_);
if ((step_ == CLOSE_FILE) &&
(retcode < 0))
{
Lng32 cliError = 0;
Lng32 intParam1 = -retcode;
ComDiagsArea * diagsArea = NULL;
ExRaiseSqlError(getHeap(), &diagsArea,
(ExeErrorCode)(EXE_ERROR_FROM_LOB_INTERFACE), NULL,
&intParam1,
&cliError,
NULL,
"HDFS",
(char*)"ExpLOBinterfaceCloseFile",
getLobErrStr(intParam1));
pentry_down->setDiagsArea(diagsArea);
}
if (ehi_)
retcode = ehi_->hdfsClose();
}
if (step_ == CLOSE_FILE)
{
currRangeNum_++;
if (currRangeNum_ < (beginRangeNum_ + numRanges_)) {
if (((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_)) ||
(pentry_down->downState.request == ex_queue::GET_NOMORE))
step_ = DONE;
else
// move to the next file.
step_ = INIT_HDFS_CURSOR;
break;
}
}
step_ = DONE;
}
break;
case DONE:
{
if (qparent_.up->isFull())
return WORK_OK;
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_NO_DATA;
up_entry->upState.setMatchNo(matches_);
qparent_.up->insert();
qparent_.down->removeHead();
step_ = NOT_STARTED;
break;
}
default:
{
break;
}
} // switch
} // while
return WORK_OK;
}
char * ExHdfsScanTcb::extractAndTransformAsciiSourceToSqlRow(int &err,
ComDiagsArea* &diagsArea)
{
err = 0;
char *sourceData = hdfsBufNextRow_;
char *sourceRowEnd = NULL;
char *sourceColEnd = NULL;
NABoolean isTrailingMissingColumn = FALSE;
ExpTupleDesc * asciiSourceTD =
hdfsScanTdb().workCriDesc_->getTupleDescriptor(hdfsScanTdb().asciiTuppIndex_);
const char cd = hdfsScanTdb().columnDelimiter_;
const char rd = hdfsScanTdb().recordDelimiter_;
const char *sourceDataEnd = hdfsScanBuffer_+trailingPrevRead_+ bytesRead_;
hdfsLoggingRow_ = hdfsBufNextRow_;
if (asciiSourceTD->numAttrs() == 0)
{
sourceRowEnd = hdfs_strchr(sourceData, rd, sourceDataEnd, checkRangeDelimiter_);
hdfsLoggingRowEnd_ = sourceRowEnd;
if (!sourceRowEnd)
return NULL;
if ((endOfRequestedRange_) &&
(sourceRowEnd >= endOfRequestedRange_)) {
checkRangeDelimiter_ = TRUE;
*(sourceRowEnd +1)= RANGE_DELIMITER;
}
// no columns need to be converted. For e.g. count(*) with no predicate
return sourceRowEnd+1;
}
Lng32 neededColIndex = 0;
Attributes * attr = NULL;
NABoolean rdSeen = FALSE;
for (Lng32 i = 0; i < hdfsScanTdb().convertSkipListSize_; i++)
{
// all remainin columns wil be skip columns, don't bother
// finding their column delimiters
if (neededColIndex == asciiSourceTD->numAttrs())
continue;
if (hdfsScanTdb().convertSkipList_[i] > 0)
{
attr = asciiSourceTD->getAttr(neededColIndex);
neededColIndex++;
}
else
attr = NULL;
if (!isTrailingMissingColumn) {
sourceColEnd = hdfs_strchr(sourceData, rd, cd, sourceDataEnd, checkRangeDelimiter_, &rdSeen);
if (sourceColEnd == NULL) {
if (rdSeen || (sourceRowEnd == NULL))
return NULL;
else
return sourceRowEnd+1;
}
short len = 0;
len = sourceColEnd - sourceData;
if (rdSeen) {
sourceRowEnd = sourceColEnd;
hdfsLoggingRowEnd_ = sourceRowEnd;
if ((endOfRequestedRange_) &&
(sourceRowEnd >= endOfRequestedRange_)) {
checkRangeDelimiter_ = TRUE;
*(sourceRowEnd +1)= RANGE_DELIMITER;
}
if (i != hdfsScanTdb().convertSkipListSize_ - 1)
isTrailingMissingColumn = TRUE;
}
if (attr) // this is a needed column. We need to convert
{
*(short*)&hdfsAsciiSourceData_[attr->getVCLenIndOffset()] = len;
if (attr->getNullFlag())
{
if (len == 0)
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
else if (memcmp(sourceData, "\\N", len) == 0)
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
else
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = 0;
}
if (len > 0)
{
// move address of data into the source operand.
// convertExpr will dereference this addr and get to the actual
// data.
*(Int64*)&hdfsAsciiSourceData_[attr->getOffset()] =
(Int64)sourceData;
}
else
{
*(Int64*)&hdfsAsciiSourceData_[attr->getOffset()] =
(Int64)0;
}
} // if(attr)
} // if (!trailingMissingColumn)
else
{
// A delimiter was found, but not enough columns.
// Treat the rest of the columns as NULL.
if (attr && attr->getNullFlag())
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
}
sourceData = sourceColEnd + 1 ;
}
// It is possible that the above loop came out before
// rowDelimiter is encountered
// So try to find the record delimiter
if (sourceRowEnd == NULL) {
sourceRowEnd = hdfs_strchr(sourceData, rd, sourceDataEnd, checkRangeDelimiter_);
if (sourceRowEnd) {
hdfsLoggingRowEnd_ = sourceRowEnd;
if ((endOfRequestedRange_) &&
(sourceRowEnd >= endOfRequestedRange_ )) {
checkRangeDelimiter_ = TRUE;
*(sourceRowEnd +1)= RANGE_DELIMITER;
}
}
}
workAtp_->getTupp(hdfsScanTdb().workAtpIndex_) = hdfsSqlTupp_;
workAtp_->getTupp(hdfsScanTdb().asciiTuppIndex_) = hdfsAsciiSourceTupp_;
// for later
workAtp_->getTupp(hdfsScanTdb().moveExprColsTuppIndex_) = moveExprColsTupp_;
if (convertExpr())
{
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(workAtp_, workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
err = -1;
else
err = 0;
}
if (sourceRowEnd)
return sourceRowEnd+1;
return NULL;
}
short ExHdfsScanTcb::moveRowToUpQueue(const char * row, Lng32 len,
short * rc, NABoolean isVarchar)
{
if (qparent_.up->isFull())
{
if (rc)
*rc = WORK_OK;
return -1;
}
Lng32 length;
if (len <= 0)
length = strlen(row);
else
length = len;
tupp p;
if (pool_->get_free_tuple(p, (Lng32)
((isVarchar ? SQL_VARCHAR_HDR_SIZE : 0)
+ length)))
{
if (rc)
*rc = WORK_POOL_BLOCKED;
return -1;
}
char * dp = p.getDataPointer();
if (isVarchar)
{
*(short*)dp = (short)length;
str_cpy_all(&dp[SQL_VARCHAR_HDR_SIZE], row, length);
}
else
{
str_cpy_all(dp, row, length);
}
ex_queue_entry * pentry_down = qparent_.down->getHeadEntry();
ex_queue_entry * up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->getAtp()->getTupp((Lng32)hdfsScanTdb().tuppIndex_) = p;
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.setMatchNo(++matches_);
up_entry->upState.status = ex_queue::Q_OK_MMORE;
// insert into parent
qparent_.up->insert();
return 0;
}
short ExHdfsScanTcb::handleError(short &rc)
{
if (qparent_.up->isFull())
{
rc = WORK_OK;
return -1;
}
if (qparent_.up->isFull())
return WORK_OK;
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_SQLERROR;
qparent_.up->insert();
return 0;
}
short ExHdfsScanTcb::handleDone(ExWorkProcRetcode &rc)
{
if (qparent_.up->isFull())
{
rc = WORK_OK;
return -1;
}
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_NO_DATA;
up_entry->upState.setMatchNo(matches_);
qparent_.up->insert();
qparent_.down->removeHead();
return 0;
}
////////////////////////////////////////////////////////////////////////
// ORC files
////////////////////////////////////////////////////////////////////////
ExOrcScanTcb::ExOrcScanTcb(
const ComTdbHdfsScan &orcScanTdb,
ex_globals * glob ) :
ExHdfsScanTcb( orcScanTdb, glob),
step_(NOT_STARTED)
{
orci_ = ExpORCinterface::newInstance(glob->getDefaultHeap(),
(char*)orcScanTdb.hostName_,
orcScanTdb.port_);
}
ExOrcScanTcb::~ExOrcScanTcb()
{
}
short ExOrcScanTcb::extractAndTransformOrcSourceToSqlRow(
char * orcRow,
Int64 orcRowLen,
Lng32 numOrcCols,
ComDiagsArea* &diagsArea)
{
short err = 0;
if ((!orcRow) || (orcRowLen <= 0))
return -1;
char *sourceData = orcRow;
ExpTupleDesc * asciiSourceTD =
hdfsScanTdb().workCriDesc_->getTupleDescriptor(hdfsScanTdb().asciiTuppIndex_);
if (asciiSourceTD->numAttrs() == 0)
{
// no columns need to be converted. For e.g. count(*) with no predicate
return 0;
}
Lng32 neededColIndex = 0;
Attributes * attr = NULL;
Lng32 numCurrCols = 0;
Lng32 currColLen;
for (Lng32 i = 0; i < hdfsScanTdb().convertSkipListSize_; i++)
{
if (hdfsScanTdb().convertSkipList_[i] > 0)
{
attr = asciiSourceTD->getAttr(neededColIndex);
neededColIndex++;
}
else
attr = NULL;
currColLen = *(Lng32*)sourceData;
sourceData += sizeof(currColLen);
if (attr) // this is a needed column. We need to convert
{
*(short*)&hdfsAsciiSourceData_[attr->getVCLenIndOffset()] = currColLen;
if (attr->getNullFlag())
{
if (currColLen == 0)
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
else if (memcmp(sourceData, "\\N", currColLen) == 0)
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = -1;
else
*(short *)&hdfsAsciiSourceData_[attr->getNullIndOffset()] = 0;
}
if (currColLen > 0)
{
// move address of data into the source operand.
// convertExpr will dereference this addr and get to the actual
// data.
*(Int64*)&hdfsAsciiSourceData_[attr->getOffset()] =
(Int64)sourceData;
}
} // if(attr)
numCurrCols++;
sourceData += currColLen;
}
if (numCurrCols != numOrcCols)
{
return -1;
}
workAtp_->getTupp(hdfsScanTdb().workAtpIndex_) = hdfsSqlTupp_;
workAtp_->getTupp(hdfsScanTdb().asciiTuppIndex_) = hdfsAsciiSourceTupp_;
// for later
workAtp_->getTupp(hdfsScanTdb().moveExprColsTuppIndex_) = moveExprColsTupp_;
err = 0;
if (convertExpr())
{
ex_expr::exp_return_type evalRetCode =
convertExpr()->eval(workAtp_, workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
err = -1;
else
err = 0;
}
return err;
}
ExWorkProcRetcode ExOrcScanTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = DONE;
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
hdfsStats_ = NULL;
if (getStatsEntry())
hdfsStats_ = getStatsEntry()->castToExHdfsScanStats();
ex_assert(hdfsStats_, "hdfs stats cannot be null");
if (hdfsStats_)
hdfsStats_->init();
beginRangeNum_ = -1;
numRanges_ = -1;
if (hdfsScanTdb().getHdfsFileInfoList()->isEmpty())
{
step_ = DONE;
break;
}
myInstNum_ = getGlobals()->getMyInstanceNumber();
beginRangeNum_ =
*(Lng32*)hdfsScanTdb().getHdfsFileRangeBeginList()->get(myInstNum_);
numRanges_ =
*(Lng32*)hdfsScanTdb().getHdfsFileRangeNumList()->get(myInstNum_);
currRangeNum_ = beginRangeNum_;
if (numRanges_ > 0)
step_ = INIT_ORC_CURSOR;
else
step_ = DONE;
}
break;
case INIT_ORC_CURSOR:
{
/* orci_ = ExpORCinterface::newInstance(getHeap(),
(char*)hdfsScanTdb().hostName_,
*/
hdfo_ = (HdfsFileInfo*)
hdfsScanTdb().getHdfsFileInfoList()->get(currRangeNum_);
orcStartRowNum_ = hdfo_->getStartRow();
orcNumRows_ = hdfo_->getNumRows();
hdfsFileName_ = hdfo_->fileName();
sprintf(cursorId_, "%d", currRangeNum_);
if (orcNumRows_ == -1) // select all rows
orcStopRowNum_ = -1;
else
orcStopRowNum_ = orcStartRowNum_ + orcNumRows_ - 1;
step_ = OPEN_ORC_CURSOR;
}
break;
case OPEN_ORC_CURSOR:
{
retcode = orci_->scanOpen(hdfsFileName_,
orcStartRowNum_, orcStopRowNum_);
if (retcode < 0)
{
setupError(EXE_ERROR_FROM_LOB_INTERFACE, retcode, "ORC", "scanOpen",
orci_->getErrorText(-retcode));
step_ = HANDLE_ERROR;
break;
}
step_ = GET_ORC_ROW;
}
break;
case GET_ORC_ROW:
{
orcRow_ = hdfsScanBuffer_;
orcRowLen_ = hdfsScanTdb().hdfsBufSize_;
retcode = orci_->scanFetch(orcRow_, orcRowLen_, orcRowNum_,
numOrcCols_);
if (retcode < 0)
{
setupError(EXE_ERROR_FROM_LOB_INTERFACE, retcode, "ORC", "scanFetch",
orci_->getErrorText(-retcode));
step_ = HANDLE_ERROR;
break;
}
if (retcode == 100)
{
step_ = CLOSE_ORC_CURSOR;
break;
}
step_ = PROCESS_ORC_ROW;
}
break;
case PROCESS_ORC_ROW:
{
int formattedRowLength = 0;
ComDiagsArea *transformDiags = NULL;
short err =
extractAndTransformOrcSourceToSqlRow(orcRow_, orcRowLen_,
numOrcCols_, transformDiags);
if (err)
{
if (transformDiags)
pentry_down->setDiagsArea(transformDiags);
step_ = HANDLE_ERROR;
break;
}
if (hdfsStats_)
hdfsStats_->incAccessedRows();
workAtp_->getTupp(hdfsScanTdb().workAtpIndex_) =
hdfsSqlTupp_;
bool rowWillBeSelected = true;
if (selectPred())
{
ex_expr::exp_return_type evalRetCode =
selectPred()->eval(pentry_down->getAtp(), workAtp_);
if (evalRetCode == ex_expr::EXPR_FALSE)
rowWillBeSelected = false;
else if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
else
ex_assert(evalRetCode == ex_expr::EXPR_TRUE,
"invalid return code from expr eval");
}
if (rowWillBeSelected)
{
if (moveColsConvertExpr())
{
ex_expr::exp_return_type evalRetCode =
moveColsConvertExpr()->eval(workAtp_, workAtp_);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
}
if (hdfsStats_)
hdfsStats_->incUsedRows();
step_ = RETURN_ROW;
break;
}
step_ = GET_ORC_ROW;
}
break;
case RETURN_ROW:
{
if (qparent_.up->isFull())
return WORK_OK;
ex_queue_entry *up_entry = qparent_.up->getTailEntry();
up_entry->copyAtp(pentry_down);
up_entry->upState.parentIndex =
pentry_down->downState.parentIndex;
up_entry->upState.downIndex = qparent_.down->getHeadIndex();
up_entry->upState.status = ex_queue::Q_OK_MMORE;
if (moveExpr())
{
UInt32 maxRowLen = hdfsScanTdb().outputRowLength_;
UInt32 rowLen = maxRowLen;
if (hdfsScanTdb().useCifDefrag() &&
!pool_->currentBufferHasEnoughSpace((Lng32)hdfsScanTdb().outputRowLength_))
{
up_entry->getTupp(hdfsScanTdb().tuppIndex_) = defragTd_;
defragTd_->setReferenceCount(1);
ex_expr::exp_return_type evalRetCode =
moveExpr()->eval(up_entry->getAtp(), workAtp_,0,-1,&rowLen);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
// Get diags from up_entry onto pentry_down, which
// is where the HANDLE_ERROR step expects it.
ComDiagsArea *diagsArea = pentry_down->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
pentry_down->setDiagsArea (diagsArea);
}
pentry_down->getDiagsArea()->
mergeAfter(*up_entry->getDiagsArea());
up_entry->setDiagsArea(NULL);
step_ = HANDLE_ERROR;
break;
}
if (pool_->get_free_tuple(
up_entry->getTupp(hdfsScanTdb().tuppIndex_),
rowLen))
return WORK_POOL_BLOCKED;
str_cpy_all(up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer(),
defragTd_->getTupleAddress(),
rowLen);
}
else
{
if (pool_->get_free_tuple(
up_entry->getTupp(hdfsScanTdb().tuppIndex_),
(Lng32)hdfsScanTdb().outputRowLength_))
return WORK_POOL_BLOCKED;
ex_expr::exp_return_type evalRetCode =
moveExpr()->eval(up_entry->getAtp(), workAtp_,0,-1,&rowLen);
if (evalRetCode == ex_expr::EXPR_ERROR)
{
// Get diags from up_entry onto pentry_down, which
// is where the HANDLE_ERROR step expects it.
ComDiagsArea *diagsArea = pentry_down->getDiagsArea();
if (diagsArea == NULL)
{
diagsArea =
ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
pentry_down->setDiagsArea (diagsArea);
}
pentry_down->getDiagsArea()->
mergeAfter(*up_entry->getDiagsArea());
up_entry->setDiagsArea(NULL);
step_ = HANDLE_ERROR;
break;
}
if (hdfsScanTdb().useCif() && rowLen != maxRowLen)
{
pool_->resizeLastTuple(rowLen,
up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer());
}
}
}
up_entry->upState.setMatchNo(++matches_);
if (matches_ == matchBrkPoint_)
brkpoint();
qparent_.up->insert();
// use ExOperStats now, to cover OPERATOR stats as well as
// ALL stats.
if (getStatsEntry())
getStatsEntry()->incActualRowsReturned();
workAtp_->setDiagsArea(NULL); // get rid of warnings.
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
step_ = CLOSE_ORC_CURSOR;
else
step_ = GET_ORC_ROW;
break;
}
case CLOSE_ORC_CURSOR:
{
retcode = orci_->scanClose();
if (retcode < 0)
{
setupError(EXE_ERROR_FROM_LOB_INTERFACE, retcode, "ORC", "scanClose",
orci_->getErrorText(-retcode));
step_ = HANDLE_ERROR;
break;
}
currRangeNum_++;
if (currRangeNum_ < (beginRangeNum_ + numRanges_))
{
// move to the next file.
step_ = INIT_ORC_CURSOR;
break;
}
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc))
return rc;
step_ = NOT_STARTED;
}
break;
default:
{
break;
}
} // switch
} // while
return WORK_OK;
}
ExOrcFastAggrTcb::ExOrcFastAggrTcb(
const ComTdbOrcFastAggr &orcAggrTdb,
ex_globals * glob ) :
ExOrcScanTcb(orcAggrTdb, glob),
step_(NOT_STARTED)
{
if (orcAggrTdb.outputRowLength_ > 0)
aggrRow_ = new(glob->getDefaultHeap()) char[orcAggrTdb.outputRowLength_];
}
ExOrcFastAggrTcb::~ExOrcFastAggrTcb()
{
}
ExWorkProcRetcode ExOrcFastAggrTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if (pentry_down->downState.request == ex_queue::GET_NOMORE)
step_ = DONE;
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
hdfsStats_ = NULL;
if (getStatsEntry())
hdfsStats_ = getStatsEntry()->castToExHdfsScanStats();
ex_assert(hdfsStats_, "hdfs stats cannot be null");
orcAggrTdb().getHdfsFileInfoList()->position();
rowCount_ = 0;
step_ = ORC_AGGR_INIT;
}
break;
case ORC_AGGR_INIT:
{
if (orcAggrTdb().getHdfsFileInfoList()->atEnd())
{
step_ = ORC_AGGR_PROJECT;
break;
}
hdfo_ = (HdfsFileInfo*)orcAggrTdb().getHdfsFileInfoList()->getNext();
hdfsFileName_ = hdfo_->fileName();
step_ = ORC_AGGR_EVAL;
}
break;
case ORC_AGGR_EVAL:
{
Int64 currRowCount = 0;
retcode = orci_->getRowCount(hdfsFileName_, currRowCount);
if (retcode < 0)
{
setupError(EXE_ERROR_FROM_LOB_INTERFACE, retcode, "ORC", "getRowCount",
orci_->getErrorText(-retcode));
step_ = HANDLE_ERROR;
break;
}
rowCount_ += currRowCount;
step_ = ORC_AGGR_INIT;
}
break;
case ORC_AGGR_PROJECT:
{
ExpTupleDesc * projTuppTD =
orcAggrTdb().workCriDesc_->getTupleDescriptor
(orcAggrTdb().workAtpIndex_);
Attributes * attr = projTuppTD->getAttr(0);
if (! attr)
{
step_ = HANDLE_ERROR;
break;
}
if (attr->getNullFlag())
{
*(short*)&aggrRow_[attr->getNullIndOffset()] = 0;
}
str_cpy_all(&aggrRow_[attr->getOffset()], (char*)&rowCount_, sizeof(rowCount_));
step_ = ORC_AGGR_RETURN;
}
break;
case ORC_AGGR_RETURN:
{
if (qparent_.up->isFull())
return WORK_OK;
short rc = 0;
if (moveRowToUpQueue(aggrRow_, orcAggrTdb().outputRowLength_,
&rc, FALSE))
return rc;
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc))
return rc;
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
| 1 | 11,085 | Usually, CQDs are not accessed directly in executor operators directly. It should be passed as a flag in the TDB. It is possible that the query is compiled in a different process, then this CQD setting won't be available in the executor layer. | apache-trafodion | cpp |
@@ -477,9 +477,9 @@ func ShardID(shardID int32) ZapTag {
return NewInt32("shard-id", shardID)
}
-// ShardItem returns tag for ShardItem
-func ShardItem(shardItem interface{}) ZapTag {
- return NewAnyTag("shard-item", shardItem)
+// ShardContext returns tag for shard.Context
+func ShardContext(shard interface{}) ZapTag {
+ return NewAnyTag("shard-context", shard)
}
// ShardTime returns tag for ShardTime | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tag
import (
"fmt"
"time"
enumspb "go.temporal.io/api/enums/v1"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/common/primitives/timestamp"
)
// All logging tags are defined in this file.
// To help finding available tags, we recommend that all tags to be categorized and placed in the corresponding section.
// We currently have those categories:
// 0. Common tags that can't be categorized(or belong to more than one)
// 1. Workflow: these tags are information that are useful to our customer, like workflow-id/run-id/task-queue/...
// 2. System : these tags are internal information which usually cannot be understood by our customers,
// LoggingCallAtKey is reserved tag
const LoggingCallAtKey = "logging-call-at"
/////////////////// Common tags defined here ///////////////////
// Operation returns tag for Operation
func Operation(operation string) ZapTag {
return NewStringTag("operation", operation)
}
// Error returns tag for Error
func Error(err error) ZapTag {
return NewErrorTag(err)
}
// IsRetryable returns tag for IsRetryable
func IsRetryable(isRetryable bool) ZapTag {
return NewBoolTag("is-retryable", isRetryable)
}
// ClusterName returns tag for ClusterName
func ClusterName(clusterName string) ZapTag {
return NewStringTag("cluster-name", clusterName)
}
// Timestamp returns tag for Timestamp
func Timestamp(timestamp time.Time) ZapTag {
return NewTimeTag("timestamp", timestamp)
}
// Timestamp returns tag for Timestamp
func TimestampPtr(t *time.Time) ZapTag {
return NewTimeTag("timestamp", timestamp.TimeValue(t))
}
/////////////////// Workflow tags defined here: ( wf is short for workflow) ///////////////////
// WorkflowAction returns tag for WorkflowAction
func workflowAction(action string) ZapTag {
return NewStringTag("wf-action", action)
}
// WorkflowListFilterType returns tag for WorkflowListFilterType
func workflowListFilterType(listFilterType string) ZapTag {
return NewStringTag("wf-list-filter-type", listFilterType)
}
// general
// WorkflowTimeoutType returns tag for WorkflowTimeoutType
func WorkflowTimeoutType(timeoutType enumspb.TimeoutType) ZapTag {
return NewStringTag("wf-timeout-type", timeoutType.String())
}
// WorkflowPollContextTimeout returns tag for WorkflowPollContextTimeout
func WorkflowPollContextTimeout(pollContextTimeout time.Duration) ZapTag {
return NewDurationTag("wf-poll-context-timeout", pollContextTimeout)
}
// WorkflowHandlerName returns tag for WorkflowHandlerName
func WorkflowHandlerName(handlerName string) ZapTag {
return NewStringTag("wf-handler-name", handlerName)
}
// WorkflowID returns tag for WorkflowID
func WorkflowID(workflowID string) ZapTag {
return NewStringTag("wf-id", workflowID)
}
// WorkflowType returns tag for WorkflowType
func WorkflowType(wfType string) ZapTag {
return NewStringTag("wf-type", wfType)
}
// WorkflowState returns tag for WorkflowState
func WorkflowState(s enumsspb.WorkflowExecutionState) ZapTag {
return NewStringTag("wf-state", s.String())
}
// WorkflowRunID returns tag for WorkflowRunID
func WorkflowRunID(runID string) ZapTag {
return NewStringTag("wf-run-id", runID)
}
// WorkflowResetBaseRunID returns tag for WorkflowResetBaseRunID
func WorkflowResetBaseRunID(runID string) ZapTag {
return NewStringTag("wf-reset-base-run-id", runID)
}
// WorkflowResetNewRunID returns tag for WorkflowResetNewRunID
func WorkflowResetNewRunID(runID string) ZapTag {
return NewStringTag("wf-reset-new-run-id", runID)
}
// WorkflowBinaryChecksum returns tag for WorkflowBinaryChecksum
func WorkflowBinaryChecksum(cs string) ZapTag {
return NewStringTag("wf-binary-checksum", cs)
}
// WorkflowActivityID returns tag for WorkflowActivityID
func WorkflowActivityID(id string) ZapTag {
return NewStringTag("wf-activity-id", id)
}
// WorkflowTimerID returns tag for WorkflowTimerID
func WorkflowTimerID(id string) ZapTag {
return NewStringTag("wf-timer-id", id)
}
// WorkflowBeginningRunID returns tag for WorkflowBeginningRunID
func WorkflowBeginningRunID(beginningRunID string) ZapTag {
return NewStringTag("wf-beginning-run-id", beginningRunID)
}
// WorkflowEndingRunID returns tag for WorkflowEndingRunID
func WorkflowEndingRunID(endingRunID string) ZapTag {
return NewStringTag("wf-ending-run-id", endingRunID)
}
// WorkflowTaskTimeoutSeconds returns tag for WorkflowTaskTimeoutSeconds
func WorkflowTaskTimeoutSeconds(s int64) ZapTag {
return NewInt64("workflow-task-timeout", s)
}
// WorkflowTaskTimeoutSeconds returns tag for WorkflowTaskTimeoutSeconds
func WorkflowTaskTimeout(s *time.Duration) ZapTag {
return NewDurationPtrTag("workflow-task-timeout", s)
}
// QueryID returns tag for QueryID
func QueryID(queryID string) ZapTag {
return NewStringTag("query-id", queryID)
}
// BlobSizeViolationOperation returns tag for BlobSizeViolationOperation
func BlobSizeViolationOperation(operation string) ZapTag {
return NewStringTag("blob-size-violation-operation", operation)
}
// namespace related
// WorkflowNamespaceID returns tag for WorkflowNamespaceID
func WorkflowNamespaceID(namespaceID string) ZapTag {
return NewStringTag("wf-namespace-id", namespaceID)
}
// WorkflowNamespace returns tag for WorkflowNamespace
func WorkflowNamespace(namespace string) ZapTag {
return NewStringTag("wf-namespace", namespace)
}
// WorkflowNamespaceIDs returns tag for WorkflowNamespaceIDs
func WorkflowNamespaceIDs(namespaceIDs map[string]struct{}) ZapTag {
return NewAnyTag("wf-namespace-ids", namespaceIDs)
}
// history event ID related
// WorkflowEventID returns tag for WorkflowEventID
func WorkflowEventID(eventID int64) ZapTag {
return NewInt64("wf-history-event-id", eventID)
}
// WorkflowScheduleID returns tag for WorkflowScheduleID
func WorkflowScheduleID(scheduleID int64) ZapTag {
return NewInt64("wf-schedule-id", scheduleID)
}
// WorkflowStartedID returns tag for WorkflowStartedID
func WorkflowStartedID(id int64) ZapTag {
return NewInt64("wf-started-id", id)
}
// WorkflowStartedID returns tag for WorkflowStartedTimestamp
func WorkflowStartedTimestamp(t *time.Time) ZapTag {
return NewTimePtrTag("wf-started-timestamp", t)
}
// WorkflowInitiatedID returns tag for WorkflowInitiatedID
func WorkflowInitiatedID(id int64) ZapTag {
return NewInt64("wf-initiated-id", id)
}
// WorkflowFirstEventID returns tag for WorkflowFirstEventID
func WorkflowFirstEventID(firstEventID int64) ZapTag {
return NewInt64("wf-first-event-id", firstEventID)
}
// WorkflowNextEventID returns tag for WorkflowNextEventID
func WorkflowNextEventID(nextEventID int64) ZapTag {
return NewInt64("wf-next-event-id", nextEventID)
}
// WorkflowBeginningFirstEventID returns tag for WorkflowBeginningFirstEventID
func WorkflowBeginningFirstEventID(beginningFirstEventID int64) ZapTag {
return NewInt64("wf-begining-first-event-id", beginningFirstEventID)
}
// WorkflowEndingNextEventID returns tag for WorkflowEndingNextEventID
func WorkflowEndingNextEventID(endingNextEventID int64) ZapTag {
return NewInt64("wf-ending-next-event-id", endingNextEventID)
}
// WorkflowResetNextEventID returns tag for WorkflowResetNextEventID
func WorkflowResetNextEventID(resetNextEventID int64) ZapTag {
return NewInt64("wf-reset-next-event-id", resetNextEventID)
}
// history tree
// WorkflowTreeID returns tag for WorkflowTreeID
func WorkflowTreeID(treeID string) ZapTag {
return NewStringTag("wf-tree-id", treeID)
}
// WorkflowBranchID returns tag for WorkflowBranchID
func WorkflowBranchID(branchID string) ZapTag {
return NewStringTag("wf-branch-id", branchID)
}
// workflow task
// WorkflowCommandType returns tag for WorkflowCommandType
func WorkflowCommandType(commandType enumspb.CommandType) ZapTag {
return NewStringTag("command-type", commandType.String())
}
// WorkflowQueryType returns tag for WorkflowQueryType
func WorkflowQueryType(qt string) ZapTag {
return NewStringTag("wf-query-type", qt)
}
// WorkflowTaskFailedCause returns tag for WorkflowTaskFailedCause
func WorkflowTaskFailedCause(workflowTaskFailCause enumspb.WorkflowTaskFailedCause) ZapTag {
return NewStringTag("workflow-task-fail-cause", workflowTaskFailCause.String())
}
// WorkflowTaskQueueType returns tag for WorkflowTaskQueueType
func WorkflowTaskQueueType(taskQueueType enumspb.TaskQueueType) ZapTag {
return NewStringTag("wf-task-queue-type", taskQueueType.String())
}
// WorkflowTaskQueueName returns tag for WorkflowTaskQueueName
func WorkflowTaskQueueName(taskQueueName string) ZapTag {
return NewStringTag("wf-task-queue-name", taskQueueName)
}
// size limit
// WorkflowSize returns tag for WorkflowSize
func WorkflowSize(workflowSize int64) ZapTag {
return NewInt64("wf-size", workflowSize)
}
// WorkflowSignalCount returns tag for SignalCount
func WorkflowSignalCount(signalCount int64) ZapTag {
return NewInt64("wf-signal-count", signalCount)
}
// WorkflowHistorySize returns tag for HistorySize
func WorkflowHistorySize(historySize int) ZapTag {
return NewInt("wf-history-size", historySize)
}
// WorkflowHistorySizeBytes returns tag for HistorySizeBytes
func WorkflowHistorySizeBytes(historySizeBytes int) ZapTag {
return NewInt("wf-history-size-bytes", historySizeBytes)
}
// WorkflowEventCount returns tag for EventCount
func WorkflowEventCount(eventCount int) ZapTag {
return NewInt("wf-event-count", eventCount)
}
/////////////////// System tags defined here: ///////////////////
// Tags with pre-define values
// Component returns tag for Component
func component(component string) ZapTag {
return NewStringTag("component", component)
}
// Lifecycle returns tag for Lifecycle
func lifecycle(lifecycle string) ZapTag {
return NewStringTag("lifecycle", lifecycle)
}
// StoreOperation returns tag for StoreOperation
func storeOperation(storeOperation string) ZapTag {
return NewStringTag("store-operation", storeOperation)
}
// OperationResult returns tag for OperationResult
func operationResult(operationResult string) ZapTag {
return NewStringTag("operation-result", operationResult)
}
// ErrorType returns tag for ErrorType
func errorType(errorType string) ZapTag {
return NewStringTag("error-type", errorType)
}
// Shardupdate returns tag for Shardupdate
func shardupdate(shardupdate string) ZapTag {
return NewStringTag("shard-update", shardupdate)
}
// general
// Service returns tag for Service
func Service(sv string) ZapTag {
return NewStringTag("service", sv)
}
// Addresses returns tag for Addresses
func Addresses(ads []string) ZapTag {
return NewStringsTag("addresses", ads)
}
// ListenerName returns tag for ListenerName
func ListenerName(name string) ZapTag {
return NewStringTag("listener-name", name)
}
// Address return tag for Address
func Address(ad string) ZapTag {
return NewStringTag("address", ad)
}
// HostID return tag for HostID
func HostID(hid string) ZapTag {
return NewStringTag("hostId", hid)
}
// Env return tag for runtime environment
func Env(env string) ZapTag {
return NewStringTag("env", env)
}
// Key returns tag for Key
func Key(k string) ZapTag {
return NewStringTag("key", k)
}
// Name returns tag for Name
func Name(k string) ZapTag {
return NewStringTag("name", k)
}
// Value returns tag for Value
func Value(v interface{}) ZapTag {
return NewAnyTag("value", v)
}
// ValueType returns tag for ValueType
func ValueType(v interface{}) ZapTag {
return NewStringTag("value-type", fmt.Sprintf("%T", v))
}
// DefaultValue returns tag for DefaultValue
func DefaultValue(v interface{}) ZapTag {
return NewAnyTag("default-value", v)
}
// IgnoredValue returns tag for IgnoredValue
func IgnoredValue(v interface{}) ZapTag {
return NewAnyTag("ignored-value", v)
}
// Port returns tag for Port
func Port(p int) ZapTag {
return NewInt("port", p)
}
// CursorTimestamp returns tag for CursorTimestamp
func CursorTimestamp(timestamp time.Time) ZapTag {
return NewTimeTag("cursor-timestamp", timestamp)
}
// MetricScope returns tag for MetricScope
func MetricScope(metricScope int) ZapTag {
return NewInt("metric-scope", metricScope)
}
// StoreType returns tag for StoreType
func StoreType(storeType string) ZapTag {
return NewStringTag("store-type", storeType)
}
// DetailInfo returns tag for DetailInfo
func DetailInfo(i string) ZapTag {
return NewStringTag("detail-info", i)
}
// Counter returns tag for Counter
func Counter(c int) ZapTag {
return NewInt("counter", c)
}
// RequestCount returns tag for RequestCount
func RequestCount(c int) ZapTag {
return NewInt("request-count", c)
}
// Number returns tag for Number
func Number(n int64) ZapTag {
return NewInt64("number", n)
}
// NextNumber returns tag for NextNumber
func NextNumber(n int64) ZapTag {
return NewInt64("next-number", n)
}
// Bool returns tag for Bool
func Bool(b bool) ZapTag {
return NewBoolTag("bool", b)
}
// ServerName returns tag for ServerName
func ServerName(serverName string) ZapTag {
return NewStringTag("server-name", serverName)
}
// CertThumbprint returns tag for CertThumbprint
func CertThumbprint(thumbprint string) ZapTag {
return NewStringTag("cert-thumbprint", thumbprint)
}
// history engine shard
// ShardID returns tag for ShardID
func ShardID(shardID int32) ZapTag {
return NewInt32("shard-id", shardID)
}
// ShardItem returns tag for ShardItem
func ShardItem(shardItem interface{}) ZapTag {
return NewAnyTag("shard-item", shardItem)
}
// ShardTime returns tag for ShardTime
func ShardTime(shardTime interface{}) ZapTag {
return NewAnyTag("shard-time", shardTime)
}
// ShardReplicationAck returns tag for ShardReplicationAck
func ShardReplicationAck(shardReplicationAck int64) ZapTag {
return NewInt64("shard-replication-ack", shardReplicationAck)
}
// PreviousShardRangeID returns tag for PreviousShardRangeID
func PreviousShardRangeID(id int64) ZapTag {
return NewInt64("previous-shard-range-id", id)
}
// ShardRangeID returns tag for ShardRangeID
func ShardRangeID(id int64) ZapTag {
return NewInt64("shard-range-id", id)
}
// ReadLevel returns tag for ReadLevel
func ReadLevel(lv int64) ZapTag {
return NewInt64("read-level", lv)
}
// MinLevel returns tag for MinLevel
func MinLevel(lv int64) ZapTag {
return NewInt64("min-level", lv)
}
// MaxLevel returns tag for MaxLevel
func MaxLevel(lv int64) ZapTag {
return NewInt64("max-level", lv)
}
// ShardTransferAcks returns tag for ShardTransferAcks
func ShardTransferAcks(shardTransferAcks interface{}) ZapTag {
return NewAnyTag("shard-transfer-acks", shardTransferAcks)
}
// ShardTimerAcks returns tag for ShardTimerAcks
func ShardTimerAcks(shardTimerAcks interface{}) ZapTag {
return NewAnyTag("shard-timer-acks", shardTimerAcks)
}
// task queue processor
// Task returns tag for Task
func Task(task interface{}) ZapTag {
return NewAnyTag("queue-task", task)
}
// TaskID returns tag for TaskID
func TaskID(taskID int64) ZapTag {
return NewInt64("queue-task-id", taskID)
}
// TaskVersion returns tag for TaskVersion
func TaskVersion(taskVersion int64) ZapTag {
return NewInt64("queue-task-version", taskVersion)
}
// TaskVisibilityTimestamp returns tag for task visibilityTimestamp
func TaskVisibilityTimestamp(timestamp time.Time) ZapTag {
return NewTimeTag("queue-task-visibility-timestamp", timestamp)
}
// NumberProcessed returns tag for NumberProcessed
func NumberProcessed(n int) ZapTag {
return NewInt("number-processed", n)
}
// NumberDeleted returns tag for NumberDeleted
func NumberDeleted(n int) ZapTag {
return NewInt("number-deleted", n)
}
// TimerTaskStatus returns tag for TimerTaskStatus
func TimerTaskStatus(timerTaskStatus int32) ZapTag {
return NewInt32("timer-task-status", timerTaskStatus)
}
// retry
// Attempt returns tag for Attempt
func Attempt(attempt int32) ZapTag {
return NewInt32("attempt", attempt)
}
// AttemptCount returns tag for AttemptCount
func AttemptCount(attemptCount int64) ZapTag {
return NewInt64("attempt-count", attemptCount)
}
// AttemptStart returns tag for AttemptStart
func AttemptStart(attemptStart time.Time) ZapTag {
return NewTimeTag("attempt-start", attemptStart)
}
// AttemptEnd returns tag for AttemptEnd
func AttemptEnd(attemptEnd time.Time) ZapTag {
return NewTimeTag("attempt-end", attemptEnd)
}
// ScheduleAttempt returns tag for ScheduleAttempt
func ScheduleAttempt(scheduleAttempt int32) ZapTag {
return NewInt32("schedule-attempt", scheduleAttempt)
}
// ElasticSearch
// ESRequest returns tag for ESRequest
func ESRequest(ESRequest string) ZapTag {
return NewStringTag("es-request", ESRequest)
}
// ESResponseStatus returns tag for ESResponse status
func ESResponseStatus(status int) ZapTag {
return NewInt("es-response-status", status)
}
// ESResponseError returns tag for ESResponse error
func ESResponseError(msg string) ZapTag {
return NewStringTag("es-response-error", msg)
}
// ESKey returns tag for ESKey
func ESKey(ESKey string) ZapTag {
return NewStringTag("es-mapping-key", ESKey)
}
// ESValue returns tag for ESValue
func ESValue(ESValue []byte) ZapTag {
// convert value to string type so that the value logged is human readable
return NewStringTag("es-mapping-value", string(ESValue))
}
// ESConfig returns tag for ESConfig
func ESConfig(c interface{}) ZapTag {
return NewAnyTag("es-config", c)
}
func ESIndex(index string) ZapTag {
return NewStringTag("es-index", index)
}
func ESMapping(mapping map[string]enumspb.IndexedValueType) ZapTag {
return NewAnyTag("es-mapping", mapping)
}
func ESClusterStatus(status string) ZapTag {
return NewStringTag("es-cluster-status", status)
}
// ESField returns tag for ESField
func ESField(ESField string) ZapTag {
return NewStringTag("es-Field", ESField)
}
// ESDocID returns tag for ESDocID
func ESDocID(id string) ZapTag {
return NewStringTag("es-doc-id", id)
}
// SysStackTrace returns tag for SysStackTrace
func SysStackTrace(stackTrace string) ZapTag {
return NewStringTag("sys-stack-trace", stackTrace)
}
// TokenLastEventID returns tag for TokenLastEventID
func TokenLastEventID(id int64) ZapTag {
return NewInt64("token-last-event-id", id)
}
/////////////////// XDC tags defined here: xdc- ///////////////////
// SourceCluster returns tag for SourceCluster
func SourceCluster(sourceCluster string) ZapTag {
return NewStringTag("xdc-source-cluster", sourceCluster)
}
// PrevActiveCluster returns tag for PrevActiveCluster
func PrevActiveCluster(prevActiveCluster string) ZapTag {
return NewStringTag("xdc-prev-active-cluster", prevActiveCluster)
}
// FailoverMsg returns tag for FailoverMsg
func FailoverMsg(failoverMsg string) ZapTag {
return NewStringTag("xdc-failover-msg", failoverMsg)
}
// FailoverVersion returns tag for Version
func FailoverVersion(version int64) ZapTag {
return NewInt64("xdc-failover-version", version)
}
// CurrentVersion returns tag for CurrentVersion
func CurrentVersion(currentVersion int64) ZapTag {
return NewInt64("xdc-current-version", currentVersion)
}
// IncomingVersion returns tag for IncomingVersion
func IncomingVersion(incomingVersion int64) ZapTag {
return NewInt64("xdc-incoming-version", incomingVersion)
}
// FirstEventVersion returns tag for FirstEventVersion
func FirstEventVersion(version int64) ZapTag {
return NewInt64("xdc-first-event-version", version)
}
// LastEventVersion returns tag for LastEventVersion
func LastEventVersion(version int64) ZapTag {
return NewInt64("xdc-last-event-version", version)
}
// TokenLastEventVersion returns tag for TokenLastEventVersion
func TokenLastEventVersion(version int64) ZapTag {
return NewInt64("xdc-token-last-event-version", version)
}
/////////////////// Archival tags defined here: archival- ///////////////////
// archival request tags
// ArchivalCallerServiceName returns tag for the service name calling archival client
func ArchivalCallerServiceName(callerServiceName string) ZapTag {
return NewStringTag("archival-caller-service-name", callerServiceName)
}
// ArchivalArchiveAttemptedInline returns tag for whether archival is attempted inline before signal is sent.
func ArchivalArchiveAttemptedInline(archiveInline bool) ZapTag {
return NewBoolTag("archival-archive-attempted-inline", archiveInline)
}
// ArchivalRequestNamespaceID returns tag for RequestNamespaceID
func ArchivalRequestNamespaceID(requestNamespaceID string) ZapTag {
return NewStringTag("archival-request-namespace-id", requestNamespaceID)
}
// ArchivalRequestNamespace returns tag for RequestNamespace
func ArchivalRequestNamespace(requestNamespace string) ZapTag {
return NewStringTag("archival-request-namespace", requestNamespace)
}
// ArchivalRequestWorkflowID returns tag for RequestWorkflowID
func ArchivalRequestWorkflowID(requestWorkflowID string) ZapTag {
return NewStringTag("archival-request-workflow-id", requestWorkflowID)
}
// ArchvialRequestWorkflowType returns tag for RequestWorkflowType
func ArchvialRequestWorkflowType(requestWorkflowType string) ZapTag {
return NewStringTag("archival-request-workflow-type", requestWorkflowType)
}
// ArchivalRequestRunID returns tag for RequestRunID
func ArchivalRequestRunID(requestRunID string) ZapTag {
return NewStringTag("archival-request-run-id", requestRunID)
}
// ArchivalRequestBranchToken returns tag for RequestBranchToken
func ArchivalRequestBranchToken(requestBranchToken []byte) ZapTag {
return NewBinaryTag("archival-request-branch-token", requestBranchToken)
}
// ArchivalRequestNextEventID returns tag for RequestNextEventID
func ArchivalRequestNextEventID(requestNextEventID int64) ZapTag {
return NewInt64("archival-request-next-event-id", requestNextEventID)
}
// ArchivalRequestCloseFailoverVersion returns tag for RequestCloseFailoverVersion
func ArchivalRequestCloseFailoverVersion(requestCloseFailoverVersion int64) ZapTag {
return NewInt64("archival-request-close-failover-version", requestCloseFailoverVersion)
}
// ArchivalRequestCloseTimestamp returns tag for RequestCloseTimestamp
func ArchivalRequestCloseTimestamp(requestCloseTimeStamp *time.Time) ZapTag {
return NewTimeTag("archival-request-close-timestamp", timestamp.TimeValue(requestCloseTimeStamp))
}
// ArchivalRequestStatus returns tag for RequestStatus
func ArchivalRequestStatus(requestStatus string) ZapTag {
return NewStringTag("archival-request-status", requestStatus)
}
// ArchivalURI returns tag for Archival URI
func ArchivalURI(URI string) ZapTag {
return NewStringTag("archival-URI", URI)
}
// ArchivalArchiveFailReason returns tag for ArchivalArchiveFailReason
func ArchivalArchiveFailReason(archiveFailReason string) ZapTag {
return NewStringTag("archival-archive-fail-reason", archiveFailReason)
}
// ArchivalDeleteHistoryFailReason returns tag for ArchivalDeleteHistoryFailReason
func ArchivalDeleteHistoryFailReason(deleteHistoryFailReason string) ZapTag {
return NewStringTag("archival-delete-history-fail-reason", deleteHistoryFailReason)
}
// ArchivalVisibilityQuery returns tag for the query for getting archived visibility record
func ArchivalVisibilityQuery(query string) ZapTag {
return NewStringTag("archival-visibility-query", query)
}
// The following logger tags are only used by internal archiver implemention.
// TODO: move them to internal repo once temporal plugin model is in place.
// ArchivalBlobKey returns tag for BlobKey
func ArchivalBlobKey(blobKey string) ZapTag {
return NewStringTag("archival-blob-key", blobKey)
}
// ArchivalDeterministicConstructionCheckFailReason returns tag for ArchivalDeterministicConstructionCheckFailReason
func ArchivalDeterministicConstructionCheckFailReason(deterministicConstructionCheckFailReason string) ZapTag {
return NewStringTag("archival-deterministic-construction-check-fail-reason", deterministicConstructionCheckFailReason)
}
// ArchivalNonDeterministicBlobKey returns tag for randomly generated NonDeterministicBlobKey
func ArchivalNonDeterministicBlobKey(nondeterministicBlobKey string) ZapTag {
return NewStringTag("archival-non-deterministic-blob-key", nondeterministicBlobKey)
}
// ArchivalBlobIntegrityCheckFailReason returns tag for ArchivalBlobIntegrityCheckFailReason
func ArchivalBlobIntegrityCheckFailReason(blobIntegrityCheckFailReason string) ZapTag {
return NewStringTag("archival-blob-integrity-check-fail-reason", blobIntegrityCheckFailReason)
}
// ArchivalBlobstoreContextTimeout returns tag for ArchivalBlobstoreContextTimeout
func ArchivalBlobstoreContextTimeout(blobstoreContextTimeout time.Duration) ZapTag {
return NewDurationTag("archival-blobstore-context-timeout", blobstoreContextTimeout)
}
// TransportType returns tag for transportType
func TransportType(transportType string) ZapTag {
return NewStringTag("transport-type", transportType)
}
// ActivityInfo returns tag for activity info
func ActivityInfo(activityInfo interface{}) ZapTag {
return NewAnyTag("activity-info", activityInfo)
}
// WorkflowTaskRequestId returns tag for workflow task RequestId
func WorkflowTaskRequestId(s string) ZapTag {
return NewStringTag("workflow-task-request-id", s)
}
// AckLevel returns tag for ack level
func AckLevel(s interface{}) ZapTag {
return NewAnyTag("ack-level", s)
}
// MinQueryLevel returns tag for query level
func MinQueryLevel(s time.Time) ZapTag {
return NewTimeTag("min-query-level", s)
}
// MaxQueryLevel returns tag for query level
func MaxQueryLevel(s time.Time) ZapTag {
return NewTimeTag("max-query-level", s)
}
// BootstrapHostPorts returns tag for bootstrap host ports
func BootstrapHostPorts(s string) ZapTag {
return NewStringTag("bootstrap-hostports", s)
}
// TLSCertFile returns tag for TLS cert file name
func TLSCertFile(filePath string) ZapTag {
return NewStringTag("tls-cert-file", filePath)
}
// TLSKeyFile returns tag for TLS key file
func TLSKeyFile(filePath string) ZapTag {
return NewStringTag("tls-key-file", filePath)
}
// TLSCertFiles returns tag for TLS cert file names
func TLSCertFiles(filePaths []string) ZapTag {
return NewStringsTag("tls-cert-files", filePaths)
}
// Timeout returns tag for timeout
func Timeout(timeoutValue string) ZapTag {
return NewStringTag("timeout", timeoutValue)
}
| 1 | 13,015 | This tag was broken, the value write to log was the memory address. Please verify that this actually write out meaningful content in log. | temporalio-temporal | go |
@@ -0,0 +1,9 @@
+"""Checks import position rule with pep-0008"""
+# pylint: disable=unused-import,relative-import,ungrouped-imports,import-error,no-name-in-module,relative-beyond-top-level,undefined-variable
+
+__author__ = 'some author'
+__email__ = 'some.author@some_email'
+__copyright__ = 'Some copyright'
+
+
+import sys | 1 | 1 | 8,522 | Why do you have to disable all of these checks? | PyCQA-pylint | py |
|
@@ -1639,6 +1639,7 @@ class CommandDispatcher:
"""
try:
elem.set_value(text)
+ mainwindow.raise_window(objreg.last_focused_window())
except webelem.OrphanedError as e:
message.error('Edited element vanished')
except webelem.Error as e: | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
import typing
from PyQt5.QtWidgets import QApplication, QTabBar, QDialog
from PyQt5.QtCore import Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configdata
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem, downloads)
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, debug, standarddir)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import urlmodel, miscmodels
from qutebrowser.mainwindow import mainwindow
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self, private):
"""Get a tabbed-browser from a new window."""
new_window = mainwindow.MainWindow(private=private)
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
related=False, private=None):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
private: If opening a new window, open it in private browsing mode.
If not given, inherit the current window's mode.
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window, private), 'tbwp')
if window and private is None:
private = self._tabbed_browser.private
if window or private:
tabbed_browser = self._new_tabbed_browser(private)
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, related=related)
elif background:
tabbed_browser.tabopen(url, background=True, related=related)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _tab_focus_last(self, *, show_error=True):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
if not show_error:
return
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, prev, next_, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((prev, next_, opposite), 'pno')
if prev:
return QTabBar.SelectLeftTab
elif next_:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.val.tabs.select_on_remove
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs.select_on_remove' set to "
"'last-used'!")
else: # pragma: no cover
raise ValueError("Invalid select_on_remove value "
"{!r}!".format(conf_selection))
return None
def _tab_close(self, tab, prev=False, next_=False, opposite=False):
"""Helper function for tab_close be able to handle message.async.
Args:
tab: Tab object to select be closed.
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
count: The tab index to close, or None
"""
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(prev, next_,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, prev=False, next_=False, opposite=False,
force=False, count=None):
"""Close the current/[count]th tab.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
force: Avoid confirmation for pinned tabs.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
close = functools.partial(self._tab_close, tab, prev,
next_, opposite)
self._tabbed_browser.tab_close_prompt_if_pinned(tab, force, close)
@cmdutils.register(instance='command-dispatcher', scope='window',
name='tab-pin')
@cmdutils.argument('count', count=True)
def tab_pin(self, count=None):
"""Pin/Unpin the current/[count]th tab.
Pinning a tab shrinks it to the size of its title text.
Attempting to close a pinned tab will cause a confirmation,
unless --force is passed.
Args:
count: The tab index to pin or unpin, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
to_pin = not tab.data.pinned
self._tabbed_browser.set_tab_pinned(tab, to_pin)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=urlmodel.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, related=False,
bg=False, tab=False, window=False, count=None, secure=False,
private=False):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
count: The tab index to open the URL in, or None.
secure: Force HTTPS.
private: Open a new window in private browsing mode.
"""
if url is None:
urls = [config.val.url.default_page]
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if secure:
cur_url.setScheme('https')
if not window and i > 0:
tab = False
bg = True
if tab or bg or window or private:
self._open(cur_url, tab, bg, window, related=related,
private=private)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
elif curtab.data.pinned:
message.info("Tab is pinned!")
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
if isinstance(url, QUrl):
yield url
return
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
def _print_preview(self, tab):
"""Show a print preview."""
def print_callback(ok):
if not ok:
message.error("Printing failed!")
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(functools.partial(
tab.printing.to_printer, callback=print_callback))
diag.exec_()
def _print_pdf(self, tab, filename):
"""Print to the given PDF file."""
tab.printing.check_pdf_support()
filename = os.path.expanduser(filename)
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(filename)
log.misc.debug("Print to file: {}".format(filename))
def _print(self, tab):
"""Print with a QPrintDialog."""
def print_callback(ok):
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
def do_print():
"""Called when the dialog was closed."""
tab.printing.to_printer(diag.printer(), print_callback)
diag = QPrintDialog(tab)
if utils.is_mac:
# For some reason we get a segfault when using open() on macOS
ret = diag.exec_()
if ret == QDialog.Accepted:
do_print()
else:
diag.open(do_print)
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if pdf:
tab.printing.check_pdf_support()
else:
tab.printing.check_printer_support()
if preview:
tab.printing.check_preview_support()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
if preview:
self._print_preview(tab)
elif pdf:
self._print_pdf(tab, pdf)
else:
self._print(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
try:
history = curtab.history.serialize()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs.tabs_are_windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.val.tabs.favicons.show:
new_tabbed_browser.setTabIcon(idx, curtab.icon())
if config.val.tabs.tabs_are_windows:
new_tabbed_browser.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(history)
newtab.zoom.set_factor(curtab.zoom.factor())
new_tabbed_browser.set_tab_pinned(newtab, curtab.data.pinned)
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=miscmodels.other_buffer)
def tab_take(self, index):
"""Take a tab from another window.
Args:
index: The [win_id/]index of the tab to take. Or a substring
in which case the closest match will be taken.
"""
tabbed_browser, tab = self._resolve_buffer_index(index)
if tabbed_browser is self._tabbed_browser:
raise cmdexc.CommandError("Can't take a tab from the same window")
self._open(tab.url(), tab=True)
tabbed_browser.close_tab(tab, add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('win_id', completion=miscmodels.window)
def tab_give(self, win_id: int = None):
"""Give the current tab to a new or existing window if win_id given.
If no win_id is given, the tab will get detached into a new window.
Args:
win_id: The window ID of the window to give the current tab to.
"""
if win_id == self._win_id:
raise cmdexc.CommandError("Can't give a tab to the same window")
if win_id is None:
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach from a window with "
"only one tab")
tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.tabopen(self._current_url())
self._tabbed_browser.close_tab(self._current_widget(), add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated='Use :tab-give instead!')
def tab_detach(self):
"""Deprecated way to detach a tab."""
self.tab_give()
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
try:
if forward:
widget.history.forward(count)
else:
widget.history.back(count)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
- `decrement`: Decrement the last number in the URL.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window, related=True)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Note you can use `:run-with-count` to have a keybinding with a bigger
scroll increment.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_to_perc(self, perc: float = None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str = None, bottom_navigate: str = None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/qutebrowser/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery()
url_query_str = urlutils.query_string(url)
if '&' not in url_query_str and ';' in url_query_str:
url_query.setQueryDelimiters('=', ';')
url_query.setQuery(url_query_str)
for key in dict(url_query.queryItems()):
if key in config.val.url.yank_ignored_parameters:
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
caret = self._current_widget().caret
s = caret.selection()
if not caret.has_selection() or not s:
message.info("Nothing to yank")
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.leave(self._win_id, KeyMode.caret, "yank selected",
maybe=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
if zoom is not None:
try:
zoom = int(zoom.rstrip('%'))
except ValueError:
raise cmdexc.CommandError("zoom: Invalid int value {}"
.format(zoom))
level = count if count is not None else zoom
if level is None:
level = config.val.zoom.default
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(int(level)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, prev=False, next_=False, force=False):
"""Close all tabs except for the current one.
Args:
prev: Keep tabs before the current.
next_: Keep tabs after the current.
force: Avoid confirmation for pinned tabs.
"""
cmdutils.check_exclusive((prev, next_), 'pn')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
def _to_close(i):
"""Helper method to check if a tab should be closed or not."""
return not (i == cur_idx or
(prev and i < cur_idx) or
(next_ and i > cur_idx))
# Check to see if we are closing any pinned tabs
if not force:
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i) and tab.data.pinned:
self._tabbed_browser.tab_close_prompt_if_pinned(
tab,
force,
lambda: self.tab_only(
prev=prev, next_=next_, force=True))
return
first_tab = True
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i):
self._tabbed_browser.close_tab(tab, new_undo=first_tab)
first_tab = False
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open the last closed tab or tabs."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
def _resolve_buffer_index(self, index):
"""Resolve a buffer index to the tabbedbrowser and tab.
Args:
index: The [win_id/]index of the tab to be selected. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = miscmodels.buffer()
model.set_pattern(index)
if model.count() > 0:
index = model.data(model.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
return (tabbed_browser, tabbed_browser.widget(idx-1))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('index', completion=miscmodels.buffer)
@cmdutils.argument('count', count=True)
def buffer(self, index=None, count=None):
"""Select tab by index or url/title best match.
Focuses window if necessary when index is given. If both index and
count are given, use count.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
count: The tab index to focus, starting with 1.
"""
if count is None and index is None:
raise cmdexc.CommandError("buffer: Either a count or the argument "
"index must be specified.")
if count is not None:
index = str(count)
tabbed_browser, tab = self._resolve_buffer_index(index)
window = tabbed_browser.window()
window.activateWindow()
window.raise_()
tabbed_browser.setCurrentWidget(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True)
def tab_focus(self, index: typing.Union[str, int] = None, count=None):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
"""
index = count if count is not None else index
if index == 'last':
self._tab_focus_last()
return
elif index == self._current_index() + 1:
self._tab_focus_last(show_error=False)
return
elif index is None:
self.tab_next()
return
if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int] = None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.val.tabs.wrap:
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
cur_idx = self._current_index()
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
def spawn(self, cmdline, userscript=False, verbose=False,
output=False, detach=False):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
output: Whether the output should be shown in a new tab.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
cmdutils.check_exclusive((userscript, detach), 'ud')
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
if userscript:
# ~ expansion is handled by the userscript module.
self._run_userscript(cmd, *args, verbose=verbose)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
if output:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tabbed_browser.openurl(QUrl('qute://spawn-output'), newtab=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.val.url.start_pages[0])
def _run_userscript(self, cmd, *args, verbose=False):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
tab = self._tabbed_browser.currentWidget()
if tab is not None and tab.caret.has_selection():
env['QUTE_SELECTED_TEXT'] = tab.caret.selection()
try:
env['QUTE_SELECTED_HTML'] = tab.caret.selection(html=True)
except browsertab.UnsupportedOperationError:
pass
# FIXME:qtwebengine: If tab is None, run_async will fail!
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
userscripts.run_async(tab, cmd, *args, win_id=self._win_id,
env=env, verbose=verbose)
except userscripts.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If None, use url of current page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if url is None:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}" if was_added else "Removed bookmark {}"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def download(self, url=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
Args:
url: The URL to download. If not given, download the current page.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
# FIXME:qtwebengine do this with the QtWebEngine download manager?
download_manager = objreg.get('qtnetwork-download-manager',
scope='window', window=self._win_id)
target = None
if dest is not None:
dest = downloads.transform_path(dest)
if dest is None:
raise cmdexc.CommandError("Invalid target filename")
target = downloads.FileDownloadTarget(dest)
tab = self._current_widget()
user_agent = tab.user_agent()
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
download_manager.get(url, user_agent=user_agent, target=target)
elif mhtml_:
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebEngine:
webengine_download_manager = objreg.get(
'webengine-download-manager')
try:
webengine_download_manager.get_mhtml(tab, target)
except browsertab.UnsupportedOperationError as e:
raise cmdexc.CommandError(e)
else:
download_manager.get_mhtml(tab, target)
else:
qnam = tab.networkaccessmanager()
suggested_fn = downloads.suggested_fn_from_title(
self._current_url().path(), tab.title()
)
download_manager.get(
self._current_url(),
user_agent=user_agent,
qnam=qnam,
target=target,
suggested_fn=suggested_fn
)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page in a new tab."""
tab = self._current_widget()
if tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
def show_source_cb(source):
"""Show source as soon as it's ready."""
# WORKAROUND for https://github.com/PyCQA/pylint/issues/491
# pylint: disable=no-member
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table',
title='Source for {}'.format(current_url.toDisplayString()))
# pylint: enable=no-member
highlighted = pygments.highlight(source, lexer, formatter)
new_tab = self._tabbed_browser.tabopen()
new_tab.set_html(highlighted)
new_tab.data.viewing_source = True
tab.dump_async(show_source_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
"""Write the data to disk."""
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', scope='window')
def history(self, tab=True, bg=False, window=False):
"""Show browsing history.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
url = QUrl('qute://history/')
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=miscmodels.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__.__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif topic in configdata.DATA:
path = 'settings.html#{}'.format(topic)
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='info', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.value()
if text is None:
message.error("Could not get text from the focused element.")
return
assert isinstance(text, str), text
caret_position = elem.caret_position()
ed = editor.ExternalEditor(self._tabbed_browser)
ed.editing_finished.connect(functools.partial(
self.on_editing_finished, elem))
ed.edit(text, caret_position)
@cmdutils.register(instance='command-dispatcher', scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`editor.command` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_editing_finished(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the editor was closed.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_value(text)
except webelem.OrphanedError as e:
message.error('Edited element vanished')
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget =
usertypes.ClickTarget.normal,
force_event=False):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
force_event: Force generating a fake click event.
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found with id {}!".format(value))
return
try:
elem.click(target, force_event=force_event)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text),
replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
if tab.search.search_displayed:
tab.search.clear()
if not text:
return
options = {
'ignore_case': config.val.search.ignore_case,
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
Available actions:
http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit)
http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine)
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
for _ in range(count):
try:
tab.action.run_string(action)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, file=False, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int] = None):
"""Evaluate a JavaScript string.
Args:
js_code: The string/file to evaluate.
file: Interpret js-code as a path to a file.
If the path is relative, the file is searched in a js/ subdir
in qutebrowser's data dir, e.g.
`~/.local/share/qutebrowser/js`.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
"""Show the data returned from JS."""
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
if file:
path = os.path.expanduser(js_code)
if not os.path.isabs(path):
path = os.path.join(standarddir.data(), 'js', path)
try:
with open(path, 'r', encoding='utf-8') as f:
js_code = f.read()
except OSError as e:
raise cmdexc.CommandError(str(e))
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
keyinfos = utils.parse_keystring(keystring)
except utils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in keyinfos:
press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True, backend=usertypes.Backend.QtWebKit)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False,
private=False, related=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`editor.command` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.editing_finished.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window, private=private, related=related))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window')
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window')
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False, private=False, related=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
if bg or tab or window or private or related or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window,
private=private, related=related)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fullscreen(self, leave=False):
"""Toggle fullscreen mode.
Args:
leave: Only leave fullscreen if it was entered by the page.
"""
if leave:
tab = self._current_widget()
try:
tab.action.exit_fullscreen()
except browsertab.UnsupportedOperationError:
pass
return
window = self._tabbed_browser.window()
if window.isFullScreen():
window.setWindowState(
window.state_before_fullscreen & ~Qt.WindowFullScreen)
else:
window.state_before_fullscreen = window.windowState()
window.showFullScreen()
log.misc.debug('state before fullscreen: {}'.format(
debug.qflags_key(Qt, window.state_before_fullscreen)))
| 1 | 20,201 | Just a note to myself: After merging this, I should edit the line after this one to use `message.error`, as raising a `CommandError` from here seems wrong! | qutebrowser-qutebrowser | py |
@@ -1010,8 +1010,6 @@ GLOBAL_LABEL(FUNCNAME:)
mov TEST_REG2_ASM, 1
/* xax statelessly restored here. */
mov TEST_REG2_ASM, 2
- /* xax is dead, so initial aflags spill should not use slot. */
- mov REG_XAX, 0
jmp test28_done
test28_done:
/* Fail if aflags were not restored correctly. */ | 1 | /* **********************************************************
* Copyright (c) 2015-2021 Google, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Google, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* clang-format off */
/* XXX: clang-format incorrectly detected a tab difference at "clang-format on"
* below. This is why "clang-format off" has been moved outside the ifdef until
* bug is fixed.
*/
#ifndef ASM_CODE_ONLY /* C code */
# include "tools.h"
# include "drreg-test-shared.h"
# include <setjmp.h>
/* asm routines */
void
test_asm();
void
test_asm_fault_restore_gpr();
void
test_asm_fault_restore_aflags_in_slot();
void
test_asm_fault_restore_ignore_3rd_dr_tls_slot();
void
test_asm_fault_restore_non_public_dr_slot();
void
test_asm_fault_restore_non_public_dr_slot_rip_rel_addr_in_reg();
void
test_asm_fault_restore_multi_phase_gpr_nested_spill_regions();
void
test_asm_fault_restore_aflags_in_xax();
void
test_asm_fault_restore_gpr_restored_for_read();
void
test_asm_fault_restore_multi_phase_gpr_overlapping_spill_regions();
void
test_asm_fault_restore_gpr_store_xl8();
void
test_asm_fault_restore_faux_gpr_spill();
void
test_asm_fault_restore_multi_phase_native_gpr_spilled_twice();
void
test_asm_fault_restore_multi_phase_aflags_nested_spill_regions();
void
test_asm_fault_restore_multi_phase_aflags_overlapping_spill_regions();
void
test_asm_fault_restore_aflags_restored_for_read();
void
test_asm_fault_restore_multi_phase_native_aflags_spilled_twice();
void
test_asm_fault_restore_aflags_in_slot_store_xl8();
void
test_asm_fault_restore_aflags_in_xax_store_xl8();
void
test_asm_fault_restore_aflags_xax_already_spilled();
void
test_asm_fault_restore_gpr_spilled_to_mcontext_later();
void
test_asm_fault_restore_aflags_spilled_to_mcontext_later();
void
test_asm_fault_restore_gpr_spilled_during_clean_call_later();
void
test_asm_fault_restore_aflags_spilled_during_clean_call_later();
void
test_asm_fault_restore_gpr_spilled_to_mcontext_between();
void
test_asm_fault_restore_aflags_spilled_to_mcontext_between();
void
test_asm_fault_restore_multi_phase_gpr_nested_spill_regions_insertion_outer();
void
test_asm_fault_restore_multi_phase_aflags_nested_spill_regions_insertion_outer();
static SIGJMP_BUF mark;
# if defined(UNIX)
# include <signal.h>
static void
handle_signal_test_asm(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
print("ERROR: did not expect any signal!\n");
SIGLONGJMP(mark, 1);
}
static void
handle_signal_gpr_aflags_in_slot(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_3_C)
print("ERROR: spilled register value was not preserved!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_ignore_3rd_slot(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_7_C)
print("ERROR: spilled register value was not preserved!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_non_public_slot(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
# ifdef X86
if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->SC_XAX != DRREG_TEST_9_C) {
print("ERROR: spilled register value was not preserved!\n");
exit(1);
}
}
# endif
SIGLONGJMP(mark, 1);
}
static void
handle_signal_non_public_slot_rip_rel(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
# ifdef X86
if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->SC_XAX != DRREG_TEST_11_C) {
print("ERROR: spilled register value was not preserved!\n");
exit(1);
}
}
# endif
SIGLONGJMP(mark, 1);
}
static void
handle_signal_multi_phase_gpr(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_14_C)
print("ERROR: spilled register value was not preserved in test #14!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_17_C)
print("ERROR: spilled register value was not preserved in test #17!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_aflags_xax_gpr_read(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #15!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_16_C)
print("ERROR: spilled register value was not preserved in test #16!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_gpr_xl8_faux_gpr_spill(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_18_C)
print("ERROR: spilled register value was not preserved in test #18!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_19_C)
print("ERROR: spilled register value was not preserved in test #19!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_gpr_multi_spill_aflags_nested(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_20_C)
print("ERROR: spilled register value was not preserved in test #20!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #21!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_multi_phase_aflags_overlapping(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #23!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_aflags_read(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #24!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_aflags_multi_spill(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #25!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_aflags_xl8(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #26!\n");
} else if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #27!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_aflags_xax_already_spilled(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #29!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_spilled_to_mcontext_later(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_30_C)
print("ERROR: spilled register value was not preserved in test #30!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #31!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_spilled_during_clean_call_later(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_CLEAN_CALL_MCONTEXT_SIG != DRREG_TEST_32_C)
print("ERROR: spilled register value was not preserved in test #32!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #33!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_spilled_to_mcontext_between(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_34_C)
print("ERROR: spilled register value was not preserved in test #34!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #35!\n");
}
SIGLONGJMP(mark, 1);
}
static void
handle_signal_nested_gpr_aflags_spill_insertion_outer(int signal, siginfo_t *siginfo, ucontext_t *ucxt)
{
if (signal == SIGILL) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (sc->TEST_REG_SIG != DRREG_TEST_36_C)
print("ERROR: spilled register value was not preserved in test #36!\n");
} else if (signal == SIGSEGV) {
sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt);
if (!TESTALL(DRREG_TEST_AFLAGS_C, sc->TEST_FLAGS_SIG))
print("ERROR: spilled flags value was not preserved in test #37!\n");
}
SIGLONGJMP(mark, 1);
}
# elif defined(WINDOWS)
# include <windows.h>
static LONG WINAPI
handle_exception_test_asm(struct _EXCEPTION_POINTERS *ep)
{
print("ERROR: did not expect any signal!\n");
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_gpr_aflags_in_slot(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_3_C)
print("ERROR: spilled register value was not preserved!\n");
} else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_ignore_3rd_slot(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_7_C)
print("ERROR: spilled register value was not preserved!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_non_public_slot(struct _EXCEPTION_POINTERS *ep)
{
# ifdef X86
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_XAX_CXT != DRREG_TEST_9_C)
print("ERROR: spilled register value was not preserved!\n");
}
# endif
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_non_public_slot_rip_rel(struct _EXCEPTION_POINTERS *ep)
{
# ifdef X86
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_XAX_CXT != DRREG_TEST_11_C)
print("ERROR: spilled register value was not preserved!\n");
}
# endif
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_multi_phase_gpr(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_14_C)
print("ERROR: spilled register value was not preserved!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_aflags_xax_gpr_read(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #15!\n");
}
else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_16_C)
print("ERROR: spilled register value was not preserved in test #16!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_gpr_xl8_faux_gpr_spill(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_18_C)
print("ERROR: spilled register value was not preserved in test #18!\n");
} else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_19_C)
print("ERROR: spilled register value was not preserved in test #19!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_gpr_multi_spill_aflags_nested(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_20_C)
print("ERROR: spilled register value was not preserved in test #20!\n");
} else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #21!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_multi_phase_aflags_overlapping(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #23!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_aflags_read(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #24!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_aflags_multi_spill(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #25!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_aflags_xl8(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #26!\n");
} else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #27!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_aflags_xax_already_spilled(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #29!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_spilled_to_mcontext_later(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_30_C)
print("ERROR: spilled register value was not preserved in test #30!\n");
} else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #31!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_spilled_during_clean_call_later(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CLEAN_CALL_MCONTEXT_CXT != DRREG_TEST_32_C)
print("ERROR: spilled register value was not preserved in test #32!\n");
} else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #33!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_spilled_to_mcontext_between(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_34_C)
print("ERROR: spilled register value was not preserved in test #34!\n");
} else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #35!\n");
}
SIGLONGJMP(mark, 1);
}
static LONG WINAPI
handle_exception_nested_gpr_aflags_spill_insertion_outer(struct _EXCEPTION_POINTERS *ep)
{
if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
if (ep->ContextRecord->TEST_REG_CXT != DRREG_TEST_36_C)
print("ERROR: spilled register value was not preserved in test #36!\n");
} else if (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
if (!TESTALL(DRREG_TEST_AFLAGS_C, ep->ContextRecord->CXT_XFLAGS))
print("ERROR: spilled flags value was not preserved in test #37!\n");
}
SIGLONGJMP(mark, 1);
}
# endif
int
main(int argc, const char *argv[])
{
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_test_asm, false);
intercept_signal(SIGILL, (handler_3_t)&handle_signal_test_asm, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_test_asm);
# endif
print("drreg-test running\n");
if (SIGSETJMP(mark) == 0) {
test_asm();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_gpr_aflags_in_slot, false);
intercept_signal(SIGILL, (handler_3_t)&handle_signal_gpr_aflags_in_slot, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_gpr_aflags_in_slot);
# endif
/* Test fault reg restore */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_gpr();
}
/* Test fault aflags restore */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_in_slot();
}
# if defined(UNIX)
intercept_signal(SIGILL, (handler_3_t)&handle_signal_ignore_3rd_slot, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_ignore_3rd_slot);
# endif
/* Test fault check ignore 3rd DR TLS slot */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_ignore_3rd_dr_tls_slot();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_non_public_slot, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_non_public_slot);
# endif
/* Test fault restore of non-public DR slot used by mangling.
* Making sure drreg ignores restoring this slot.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_non_public_dr_slot();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_non_public_slot_rip_rel, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_non_public_slot_rip_rel);
# endif
/* Test 10: test fault restore of non-public DR slot used by mangling,
* when rip-rel address is forced to be in register. Making sure drreg
* ignores restoring this slot. Exposes transparency limitation of DR
* if reg is optimized to be app's dead reg.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_non_public_dr_slot_rip_rel_addr_in_reg();
}
# if defined(UNIX)
intercept_signal(SIGILL, (handler_3_t)&handle_signal_multi_phase_gpr, false);
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_multi_phase_gpr, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_multi_phase_gpr);
# endif
/* Test restore on fault for aflags reserved in multiple phases, with
* nested spill regions, and the app2app phase spill being the outer one.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_multi_phase_gpr_nested_spill_regions();
}
/* Test fault reg restore for multi-phase non-nested overlapping reservations. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_multi_phase_gpr_overlapping_spill_regions();
}
# if defined(UNIX)
intercept_signal(SIGILL, (handler_3_t)&handle_signal_aflags_xax_gpr_read, false);
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_aflags_xax_gpr_read, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_aflags_xax_gpr_read);
# endif
/* Test fault aflags restore from xax. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_in_xax();
}
/* Test fault gpr restore on fault when it has been restored before for an
* app read.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_gpr_restored_for_read();
}
# if defined(UNIX)
intercept_signal(SIGILL, (handler_3_t)&handle_signal_gpr_xl8_faux_gpr_spill, false);
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_gpr_xl8_faux_gpr_spill, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_gpr_xl8_faux_gpr_spill);
# endif
/* Test fault reg restore for fragments emitting DR_EMIT_STORE_TRANSLATIONS */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_gpr_store_xl8();
}
/* Test fault reg restore for fragments with a faux spill instr. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_faux_gpr_spill();
}
# if defined(UNIX)
intercept_signal(SIGILL, (handler_3_t)&handle_signal_gpr_multi_spill_aflags_nested, false);
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_gpr_multi_spill_aflags_nested, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_gpr_multi_spill_aflags_nested);
# endif
/* Test fault reg restore for multi-phase nested reservation where
* the first phase doesn't write the reg before the second
* reservation.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_multi_phase_native_gpr_spilled_twice();
}
/* XXX i#4849: For some aflags restore tests below we do not use SIGILL to
* raise the fault. This is because the undefined instr on AArchXX is assumed
* to read aflags, and therefore restores aflags automatically. So the
* restore logic doesn't come into play.
*/
/* Test restore on fault for aflags reserved in multiple phases, with
* nested spill regions, and the app2app phase spill being the outer one.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_multi_phase_aflags_nested_spill_regions();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_multi_phase_aflags_overlapping, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_multi_phase_aflags_overlapping);
# endif
/* Test restore on fault for aflags reserved in multiple phases
* with overlapping but not nested spill regions.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_multi_phase_aflags_overlapping_spill_regions();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_aflags_read, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_aflags_read);
# endif
/* Test restore on fault for aflags restored once (for app read)
* before crash.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_restored_for_read();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_aflags_multi_spill, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_aflags_multi_spill);
# endif
/* Test restore on fault for aflags when native aflags are spilled
* to multiple slots initially.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_multi_phase_native_aflags_spilled_twice();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_aflags_xl8, false);
intercept_signal(SIGILL, (handler_3_t)&handle_signal_aflags_xl8, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_aflags_xl8);
# endif
/* Test restore on fault for aflags spilled to slot for fragment
* emitting DR_EMIT_STORE_TRANSLATIONS.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_in_slot_store_xl8();
}
/* Test restore on fault for aflags spilled to xax for fragment
* emitting DR_EMIT_STORE_TRANSLATIONS.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_in_xax_store_xl8();
}
# if defined(UNIX)
intercept_signal(SIGILL, (handler_3_t)&handle_signal_aflags_xax_already_spilled, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_aflags_xax_already_spilled);
# endif
/* Test restore on fault for aflags stored in slot, when xax was
* already spilled and in-use by instrumentation. This is to
* verify that aflags are spilled using xax only.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_xax_already_spilled();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_spilled_to_mcontext_later, false);
intercept_signal(SIGILL, (handler_3_t)&handle_signal_spilled_to_mcontext_later, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_spilled_to_mcontext_later);
# endif
/* Test restore on fault for gpr spilled to mcontext later by non-drreg routines. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_gpr_spilled_to_mcontext_later();
}
/* Test restore on fault for aflags spilled to mcontext later by non-drreg routines. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_spilled_to_mcontext_later();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_spilled_during_clean_call_later, false);
intercept_signal(SIGILL, (handler_3_t)&handle_signal_spilled_during_clean_call_later, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_spilled_during_clean_call_later);
# endif
/* Test restore on fault for gpr spilled during clean call instrumentation later. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_gpr_spilled_during_clean_call_later();
}
/* Test restore on fault for aflags spilled during clean call instrumentation later. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_spilled_during_clean_call_later();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_spilled_to_mcontext_between, false);
intercept_signal(SIGILL, (handler_3_t)&handle_signal_spilled_to_mcontext_between, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_spilled_to_mcontext_between);
# endif
/* Test restore on fault for gpr spilled to mcontext in between its drreg spill region. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_gpr_spilled_to_mcontext_between();
}
/* Test restore on fault for aflags spilled to mcontext in between its drreg spill region. */
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_aflags_spilled_to_mcontext_between();
}
# if defined(UNIX)
intercept_signal(SIGSEGV, (handler_3_t)&handle_signal_nested_gpr_aflags_spill_insertion_outer, false);
intercept_signal(SIGILL, (handler_3_t)&handle_signal_nested_gpr_aflags_spill_insertion_outer, false);
# elif defined(WINDOWS)
SetUnhandledExceptionFilter(&handle_exception_nested_gpr_aflags_spill_insertion_outer);
# endif
/* Test restore on fault for gpr reserved in multiple phases, with
* nested spill regions, and the insertion phase spill being the outer one.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_multi_phase_gpr_nested_spill_regions_insertion_outer();
}
/* Test restore on fault for aflags reserved in multiple phases, with
* nested spill regions, and the insertion phase spill being the outer one.
*/
if (SIGSETJMP(mark) == 0) {
test_asm_fault_restore_multi_phase_aflags_nested_spill_regions_insertion_outer();
}
/* XXX i#511: add more fault tests and other tricky corner cases */
print("drreg-test finished\n");
return 0;
}
#else /* asm code *************************************************************/
# include "asm_defines.asm"
# include "drreg-test-shared.h"
START_FILE
#ifdef X64
# define FRAME_PADDING 0
#else
# define FRAME_PADDING 0
#endif
#define FUNCNAME test_asm
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test1
/* Test 1: separate write and read of reserved reg */
test1:
mov TEST_REG_ASM, DRREG_TEST_1_ASM
mov TEST_REG_ASM, DRREG_TEST_1_ASM
mov TEST_REG_ASM, REG_XSP
mov REG_XBX, PTRSZ [TEST_REG_ASM]
jmp test2_init
test2_init:
/* Initializing register for additional test on top of this one, see
* instru2instru.
*/
mov TEST_REG2_ASM, MAKE_HEX_ASM(0)
jmp test2
test2:
/* Test 2: same instr writes and reads reserved reg */
mov TEST_REG_ASM, DRREG_TEST_2_ASM
mov TEST_REG_ASM, DRREG_TEST_2_ASM
mov TEST_REG_ASM, REG_XSP
mov PTRSZ [TEST_REG_ASM - 8], TEST_REG_ASM
mov TEST_REG_ASM, PTRSZ [TEST_REG_ASM - 8]
/* Test accessing the reg again to ensure the app spill slot and tool value
* are handled in the proper order:
*/
mov TEST_REG_ASM, PTRSZ [TEST_REG_ASM]
jmp test4
/* Test 4: read and write of reserved aflags */
test4:
mov TEST_REG_ASM, DRREG_TEST_4_ASM
mov TEST_REG_ASM, DRREG_TEST_4_ASM
setne TEST_REG_ASM_LSB
cmp TEST_REG_ASM, REG_XSP
jmp test11
/* Store aflags to dead XAX, and restore when XAX is live */
test11:
mov TEST_REG_ASM, DRREG_TEST_11_ASM
mov TEST_REG_ASM, DRREG_TEST_11_ASM
cmp TEST_REG_ASM, TEST_REG_ASM
push TEST_11_CONST
pop REG_XAX
mov REG_XAX, TEST_REG_ASM
mov TEST_REG_ASM, REG_XAX
je test11_done
/* Null deref if we have incorrect eflags */
xor TEST_REG_ASM, TEST_REG_ASM
mov PTRSZ [TEST_REG_ASM], TEST_REG_ASM
jmp test11_done
test11_done:
jmp test12
/* Test 12: drreg_statelessly_restore_app_value */
test12:
mov TEST_REG_ASM, DRREG_TEST_12_ASM
mov TEST_REG_ASM, DRREG_TEST_12_ASM
mov REG_XAX, TEST_12_CONST
cmp REG_XAX, TEST_12_CONST
je test12_done
/* Null deref if we have incorrect eflags */
xor TEST_REG_ASM, TEST_REG_ASM
mov PTRSZ [TEST_REG_ASM], TEST_REG_ASM
jmp test12_done
test12_done:
jmp test13
/* Test 13: Multi-phase reg spill slot conflicts. */
test13:
mov TEST_REG_ASM, DRREG_TEST_13_ASM
mov TEST_REG_ASM, DRREG_TEST_13_ASM
/* app2app phase will reserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
/* insertion phase will reserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* insertion phase will unreserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase will unreserve TEST_REG_ASM here. */
jmp test13_done
test13_done:
/* Fail if reg was not restored correctly. */
cmp TEST_REG_ASM, DRREG_TEST_13_ASM
je test22
ud2
/* Test 22: Multi-phase aflags spill slot conflicts. */
test22:
mov TEST_REG_ASM, DRREG_TEST_22_ASM
mov TEST_REG_ASM, DRREG_TEST_22_ASM
/* Set overflow bit. */
mov al, 100
add al, 100
/* Set other aflags. */
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* app2app phase will reserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
/* insertion phase will reserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* insertion phase will unreserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase will unreserve aflags here. */
jmp test22_done
test22_done:
/* Fail if aflags were not restored correctly. */
lahf
seto al
cmp ah, DRREG_TEST_AFLAGS_ASM
jne test22_fail
cmp al, 1
jne test22_fail
jmp test28
test22_fail:
ud2
/* Unreachable, but we want this bb to end here. */
jmp test28
/* Test 28: Aflags spilled to xax, and xax statelessly restored. */
test28:
mov TEST_REG_ASM, DRREG_TEST_28_ASM
mov TEST_REG_ASM, DRREG_TEST_28_ASM
/* Set overflow bit. */
mov al, 100
add al, 100
/* Set other aflags. */
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* aflags reserved here; spilled to xax. */
mov TEST_REG2_ASM, 1
/* xax statelessly restored here. */
mov TEST_REG2_ASM, 2
/* xax is dead, so initial aflags spill should not use slot. */
mov REG_XAX, 0
jmp test28_done
test28_done:
/* Fail if aflags were not restored correctly. */
lahf
seto al
cmp ah, DRREG_TEST_AFLAGS_ASM
jne test28_fail
cmp al, 1
jne test28_fail
jmp test38
test28_fail:
ud2
/* Unreachable, but we want this bb to end here. */
jmp test38
/* Test 38: Tests that the insertion phase slot contains the
* correct app value when there's overlapping spill regions for
* some reg due to multi-phase drreg use in app2app and insertion
* phases. The insertion phase should update the reg value in its own
* slot by re-spilling it after an app2app instruction that restored
* the app value for an app read.
*/
test38:
mov TEST_REG_ASM, DRREG_TEST_38_ASM
mov TEST_REG_ASM, DRREG_TEST_38_ASM
/* app2app phase reserves TEST_REG_ASM here. */
/* app2app phase writes TEST_REG_ASM here. */
/* insertion phase reserves TEST_REG_ASM here,
* storing the app2app value in its slot.
*/
/* insertion phase writes TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
/* app2app unreserves TEST_REG_ASM here.
* Seeing this app2app write, insertion phase automatically
* re-spills TEST_REG_ASM to its slot.
*/
/* insertion phase automatically restores TEST_REG_ASM
* here, for the app read below.
*/
mov TEST_REG2_ASM, TEST_REG_ASM
cmp TEST_REG2_ASM, DRREG_TEST_38_ASM
jne test38_fail
test38_done:
jmp epilog
test38_fail:
ud2
/* Unreachable, but we want this bb to end here. */
jmp epilog
epilog:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test1
/* Test 1: separate write and read of reserved reg */
test1:
movw TEST_REG_ASM, DRREG_TEST_1_ASM
movw TEST_REG_ASM, DRREG_TEST_1_ASM
mov TEST_REG_ASM, sp
ldr r0, PTRSZ [TEST_REG_ASM]
b test2
/* Test 2: same instr writes and reads reserved reg */
test2:
movw TEST_REG_ASM, DRREG_TEST_2_ASM
movw TEST_REG_ASM, DRREG_TEST_2_ASM
mov TEST_REG_ASM, sp
ldr TEST_REG_ASM, PTRSZ [TEST_REG_ASM]
b test4
/* Test 4: read and write of reserved aflags */
test4:
movw TEST_REG_ASM, DRREG_TEST_4_ASM
movw TEST_REG_ASM, DRREG_TEST_4_ASM
sel TEST_REG_ASM, r0, r0
cmp TEST_REG_ASM, sp
b test13
/* Test 13: Multi-phase reg spill slot conflicts. */
test13:
movw TEST_REG_ASM, DRREG_TEST_13_ASM
movw TEST_REG_ASM, DRREG_TEST_13_ASM
/* app2app phase will reserve TEST_REG_ASM here. */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
/* insertion phase will reserve TEST_REG_ASM here. */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* insertion phase will unreserve TEST_REG_ASM here. */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase will unreserve TEST_REG_ASM here. */
b test13_done
test13_done:
/* Fail if reg was not restored correctly. */
movw TEST_REG2_ASM, DRREG_TEST_13_ASM
cmp TEST_REG_ASM, TEST_REG2_ASM
beq test22
.word 0xe7f000f0 /* udf */
test22:
movw TEST_REG_ASM, DRREG_TEST_22_ASM
movw TEST_REG_ASM, DRREG_TEST_22_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
/* app2app phase will reserve aflags here. */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
/* insertion phase will reserve aflags here. */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* insertion phase will unreserve aflags here. */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase will unreserve aflags here. */
b test22_done
test22_done:
/* Fail if aflags were not restored correctly. */
mrs TEST_REG_ASM, APSR
cmp TEST_REG_ASM, DRREG_TEST_AFLAGS_ASM
beq epilog
.word 0xe7f000f0 /* udf */
epilog:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test1
/* Test 1: separate write and read of reserved reg */
test1:
movz TEST_REG_ASM, DRREG_TEST_1_ASM
movz TEST_REG_ASM, DRREG_TEST_1_ASM
mov TEST_REG_ASM, sp
ldr x0, PTRSZ [TEST_REG_ASM]
b test2
/* Test 2: same instr writes and reads reserved reg */
test2:
movz TEST_REG_ASM, DRREG_TEST_2_ASM
movz TEST_REG_ASM, DRREG_TEST_2_ASM
mov TEST_REG_ASM, sp
ldr TEST_REG_ASM, PTRSZ [TEST_REG_ASM]
b test4
/* Test 4: read and write of reserved aflags */
test4:
movz TEST_REG_ASM, DRREG_TEST_4_ASM
movz TEST_REG_ASM, DRREG_TEST_4_ASM
csel TEST_REG_ASM, x0, x0, gt
cmp TEST_REG_ASM, x0
b test13
/* Test 13: Multi-phase reg spill slot conflicts. */
test13:
movz TEST_REG_ASM, DRREG_TEST_13_ASM
movz TEST_REG_ASM, DRREG_TEST_13_ASM
/* app2app phase will reserve TEST_REG_ASM here. */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
/* insertion phase will reserve TEST_REG_ASM here. */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* insertion phase will unreserve TEST_REG_ASM here. */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase will unreserve TEST_REG_ASM here. */
b test13_done
test13_done:
/* Fail if reg was not restored correctly. */
movz TEST_REG2_ASM, DRREG_TEST_13_ASM
cmp TEST_REG_ASM, TEST_REG2_ASM
beq test22
.inst 0xf36d19 /* udf */
/* Test 22: Multi-phase aflags spill slot conflicts. */
test22:
movz TEST_REG_ASM, DRREG_TEST_22_ASM
movz TEST_REG_ASM, DRREG_TEST_22_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
/* app2app phase will reserve aflags here. */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
/* insertion phase will reserve aflags here. */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* insertion phase will unreserve aflags here. */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase will unreserve aflags here. */
b test22_done
test22_done:
/* Fail if aflags were not restored correctly. */
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
mrs TEST_REG_ASM, nzcv
cmp TEST_REG2_ASM, TEST_REG_ASM
beq epilog
.inst 0xf36d19 /* udf */
epilog:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
#define FUNCNAME test_asm_fault_restore_gpr
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test3
/* Test 3: fault reg restore */
test3:
mov TEST_REG_ASM, DRREG_TEST_3_ASM
mov TEST_REG_ASM, DRREG_TEST_3_ASM
nop
ud2
jmp epilog2
epilog2:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test3
/* Test 3: fault reg restore */
test3:
movw TEST_REG_ASM, DRREG_TEST_3_ASM
movw TEST_REG_ASM, DRREG_TEST_3_ASM
nop
.word 0xe7f000f0 /* udf */
b epilog2
epilog2:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test3
/* Test 3: fault reg restore */
test3:
movz TEST_REG_ASM, DRREG_TEST_3_ASM
movz TEST_REG_ASM, DRREG_TEST_3_ASM
nop
.inst 0xf36d19 /* udf */
b epilog2
epilog2:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
#define FUNCNAME test_asm_fault_restore_aflags_in_slot
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test5
/* Test 5: fault aflags restore */
test5:
mov TEST_REG_ASM, DRREG_TEST_5_ASM
mov TEST_REG_ASM, DRREG_TEST_5_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
nop
mov REG_XAX, 0
mov REG_XAX, PTRSZ [REG_XAX] /* crash */
jmp epilog3
epilog3:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test5
/* Test 5: fault aflags restore */
test5:
movw TEST_REG_ASM, DRREG_TEST_5_ASM
movw TEST_REG_ASM, DRREG_TEST_5_ASM
/* XXX: also test GE flags */
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
nop
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
b epilog3
epilog3:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test5
/* Test 5: fault aflags restore */
test5:
movz TEST_REG_ASM, DRREG_TEST_5_ASM
movz TEST_REG_ASM, DRREG_TEST_5_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
nop
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
b epilog3
epilog3:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
#define FUNCNAME test_asm_fault_restore_ignore_3rd_dr_tls_slot
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test6
/* Test 6: fault check ignore 3rd DR TLS slot */
test6:
mov TEST_REG_ASM, DRREG_TEST_6_ASM
mov TEST_REG_ASM, DRREG_TEST_6_ASM
nop
mov TEST_REG_ASM, DRREG_TEST_7_ASM
nop
ud2
jmp epilog6
epilog6:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
/* Test 6: doesn't exist for ARM */
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
/* Test 6: doesn't exist for AARCH64 */
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
#define FUNCNAME test_asm_fault_restore_non_public_dr_slot
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
#ifdef X64
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
/* XXX i#3312: Temporarily disable test until bug has been fixed. */
#if 0
jmp test8
/* Test 8: test fault restore of non-public DR slot used by mangling.
* Making sure drreg ignores restoring this slot.
*/
test8:
mov PTRSZ [REG_XSP], REG_XAX
sub REG_XSP, 8
mov TEST_REG_ASM, DRREG_TEST_8_ASM
mov TEST_REG_ASM, DRREG_TEST_8_ASM
nop
mov REG_XAX, DRREG_TEST_9_ASM
/* The address will get mangled into register REG_XAX. */
add TEST_REG_ASM, PTRSZ SYMREF(-0x7fffffff) /* crash */
jmp epilog8
epilog8:
add REG_XSP, 8
mov REG_XAX, PTRSZ [REG_XSP]
#endif
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
#endif
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
/* Test 8: not implemented for ARM */
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
/* Test 8: not implemented for AARCH64 */
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
#define FUNCNAME test_asm_fault_restore_non_public_dr_slot_rip_rel_addr_in_reg
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
#ifdef X64
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
/* XXX i#3312: Temporarily disable test until bug has been fixed. */
#if 0
OB jmp test10
/* Test 10: test fault restore of non-public DR slot used by mangling,
* when rip-rel address is forced to be in register. Making sure drreg ignores
* restoring this slot.
*/
test10:
mov PTRSZ [REG_XSP], REG_XAX
sub REG_XSP, 8
mov TEST_REG_ASM, DRREG_TEST_10_ASM
mov TEST_REG_ASM, DRREG_TEST_10_ASM
nop
mov REG_XAX, DRREG_TEST_11_ASM
/* The address will get mangled into register REG_XAX. */
add REG_XAX, PTRSZ SYMREF(-0x7fffffff) /* crash */
jmp epilog10
epilog10:
add REG_XSP, 8
mov REG_XAX, PTRSZ [REG_XSP]
#endif
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
#endif
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
/* Test 10: not implemented for ARM */
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
/* Test 10: not implemented for AARCH64 */
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 14: restore on fault for gpr reserved in multiple phases,
* where the two spill regions are nested. In this case, the reg
* will be restored from the spill slot used by the first (app2app)
* phase.
*/
#define FUNCNAME test_asm_fault_restore_multi_phase_gpr_nested_spill_regions
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test14
test14:
mov TEST_REG_ASM, DRREG_TEST_14_ASM
mov TEST_REG_ASM, DRREG_TEST_14_ASM
/* app2app phase will reserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
ud2
/* insertion phase will reserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* insertion phase will unreserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase will unreserve TEST_REG_ASM here. */
jmp epilog14
epilog14:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test14
test14:
movw TEST_REG_ASM, DRREG_TEST_14_ASM
movw TEST_REG_ASM, DRREG_TEST_14_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.word 0xe7f000f0 /* udf */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog14
epilog14:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test14
test14:
movz TEST_REG_ASM, DRREG_TEST_14_ASM
movz TEST_REG_ASM, DRREG_TEST_14_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.inst 0xf36d19 /* udf */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog14
epilog14:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 15: restore on fault for aflags stored in xax without preceding
* xax spill.
*/
#define FUNCNAME test_asm_fault_restore_aflags_in_xax
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test15
test15:
mov TEST_REG_ASM, DRREG_TEST_15_ASM
mov TEST_REG_ASM, DRREG_TEST_15_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
nop
ud2
/* xax is dead, so should not need to spill aflags to slot. */
mov REG_XAX, 0
jmp epilog15
epilog15:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
/* This test does not have AArchXX variants. */
#elif defined(ARM)
bx lr
#elif defined(AARCH64)
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 16: restore on fault for reg restored once (for app read)
* before crash. This is to verify that the drreg state restoration
* logic doesn't forget a spill slot after it sees one restore (like
* for an app read instr),
*/
#define FUNCNAME test_asm_fault_restore_gpr_restored_for_read
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test16
test16:
mov TEST_REG_ASM, DRREG_TEST_16_ASM
mov TEST_REG_ASM, DRREG_TEST_16_ASM
nop
mov REG_XCX, 0
mov REG_XCX, PTRSZ [REG_XCX] /* crash */
/* Read reg so that it is restored once. */
add TEST_REG2_ASM, TEST_REG_ASM
jmp epilog16
epilog16:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test16
test16:
movw TEST_REG_ASM, DRREG_TEST_16_ASM
movw TEST_REG_ASM, DRREG_TEST_16_ASM
nop
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
/* Read reg so that it is restored once. */
add TEST_REG2_ASM, TEST_REG_ASM, TEST_REG_ASM
b epilog16
epilog16:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test16
test16:
movz TEST_REG_ASM, DRREG_TEST_16_ASM
movz TEST_REG_ASM, DRREG_TEST_16_ASM
nop
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
/* Read reg so that it is restored once. */
add TEST_REG2_ASM, TEST_REG_ASM, TEST_REG_ASM
b epilog16
epilog16:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 17: restore on fault for gpr reserved in multiple phases
* with overlapping but not nested spill regions. In this case,
* the app value changes slots, from the one used in app2app
* phase, to the one used in insertion phase.
*/
#define FUNCNAME test_asm_fault_restore_multi_phase_gpr_overlapping_spill_regions
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test17
test17:
mov TEST_REG_ASM, DRREG_TEST_17_ASM
mov TEST_REG_ASM, DRREG_TEST_17_ASM
/* app2app phase will reserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XCX, 0
mov REG_XCX, PTRSZ [REG_XCX] /* crash */
/* insertion phase will reserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* app2app phase will release TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* insertion phase will release TEST_REG_ASM here. */
jmp epilog17
epilog17:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test17
test17:
movw TEST_REG_ASM, DRREG_TEST_17_ASM
movw TEST_REG_ASM, DRREG_TEST_17_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog17
epilog17:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test17
test17:
movz TEST_REG_ASM, DRREG_TEST_17_ASM
movz TEST_REG_ASM, DRREG_TEST_17_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog17
epilog17:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 18: fault reg restore for fragments with DR_EMIT_STORE_TRANSLATIONS */
#define FUNCNAME test_asm_fault_restore_gpr_store_xl8
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test18
test18:
mov TEST_REG_ASM, DRREG_TEST_18_ASM
mov TEST_REG_ASM, DRREG_TEST_18_ASM
nop
ud2
jmp epilog18
epilog18:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test18
test18:
movw TEST_REG_ASM, DRREG_TEST_18_ASM
movw TEST_REG_ASM, DRREG_TEST_18_ASM
nop
.word 0xe7f000f0 /* udf */
b epilog18
epilog18:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test18
test18:
movz TEST_REG_ASM, DRREG_TEST_18_ASM
movz TEST_REG_ASM, DRREG_TEST_18_ASM
nop
.inst 0xf36d19 /* udf */
b epilog18
epilog18:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 19: Test fault reg restore for fragments with a faux spill
* instr -- an app instr that looks like a drreg spill instr, which
* may corrupt drreg state restoration. This cannot happen on x86 as
* an app instr that uses the %gs register will be mangled into a
* non-far memref.
*/
#define FUNCNAME test_asm_fault_restore_faux_gpr_spill
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
END_PROLOG
ret
#elif defined(ARM)
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test19
test19:
movz TEST_REG_ASM, DRREG_TEST_19_ASM
movz TEST_REG_ASM, DRREG_TEST_19_ASM
/* TEST_REG_ASM is reserved here. */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
/* TEST_REG_ASM is un-reserved here. */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* Read TEST_REG_ASM just so that it isn't dead. */
add TEST_REG_ASM, TEST_REG_ASM, TEST_REG_ASM
adr TEST_REG_STOLEN_ASM, some_data
/* A faux restore instr -- looks like a drreg restore but isn't.
* It will prevent us from recognising the actual spill slot for
* TEST_REG_ASM.
*/
ldr TEST_REG_ASM, PTRSZ [TEST_REG_STOLEN_ASM, #TEST_FAUX_SPILL_TLS_OFFS]
b epilog19
epilog19:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 20: Test restore on fault for gpr reserved in multiple
* phases, where the two spill regions are nested, and the first
* phase doesn't write the reg before the second reservation. This
* is to verify that drreg state restoration logic remembers that
* the app value can be found in both the spill slots.
*/
#define FUNCNAME test_asm_fault_restore_multi_phase_native_gpr_spilled_twice
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test20
test20:
mov TEST_REG_ASM, DRREG_TEST_20_ASM
mov TEST_REG_ASM, DRREG_TEST_20_ASM
/* - app2app reserves TEST_REG_ASM here, but doesn't write it.
* - insertion reserves TEST_REG_ASM here, which may confuse the
* state restoration logic into overwritting the spill slot for
* TEST_REG_ASM as the new slot also has its native value.
*/
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
ud2
/* - insertion phase unreserves TEST_REG_ASM and frees the spill
* slot.
*/
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* - insertion phase reserves TEST_REG2_ASM which would use the
* same spill slot as freed above, and overwrite TEST_REG_ASM
* value stored there currently. After this TEST_REG_ASM can
* only be found in its app2app spill slot.
* - insertion phase writes to TEST_REG_ASM so that we need to
* restore it.
*/
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase unreserves TEST_REG_ASM. */
jmp epilog20
epilog20:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test20
test20:
movw TEST_REG_ASM, DRREG_TEST_20_ASM
movw TEST_REG_ASM, DRREG_TEST_20_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.word 0xe7f000f0 /* udf */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog20
epilog20:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test20
test20:
movz TEST_REG_ASM, DRREG_TEST_20_ASM
movz TEST_REG_ASM, DRREG_TEST_20_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.inst 0xf36d19 /* udf */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog20
epilog20:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 21: restore on fault for aflags reserved in multiple phases
* with nested spill regions.
*/
#define FUNCNAME test_asm_fault_restore_multi_phase_aflags_nested_spill_regions
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test21
test21:
mov TEST_REG_ASM, DRREG_TEST_21_ASM
mov TEST_REG_ASM, DRREG_TEST_21_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* app2app phase will reserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XAX, 0
mov REG_XAX, PTRSZ [REG_XAX] /* crash */
/* insertion phase will reserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* insertion phase will unreserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase will unreserve aflags here. */
jmp epilog21
epilog21:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test21
test21:
movw TEST_REG_ASM, DRREG_TEST_21_ASM
movw TEST_REG_ASM, DRREG_TEST_21_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog21
epilog21:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test21
test21:
movz TEST_REG_ASM, DRREG_TEST_21_ASM
movz TEST_REG_ASM, DRREG_TEST_21_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog21
epilog21:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 23: restore on fault for aflags reserved in multiple phases
* with overlapping but not nested spill regions. In this case,
* the native aflags are stored in the app2app slot initially. Then,
* they are swapped to the insertion phase slot after the app2app
* unreservation.
* Note that we do not respill aflags to the same slot, but select
* a new slot at each re-spill, so the app2app phase slot gets
* recycled and used by the insertion phase slot to re-spill the app
* aflags.
*/
#define FUNCNAME test_asm_fault_restore_multi_phase_aflags_overlapping_spill_regions
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test23
test23:
mov TEST_REG_ASM, DRREG_TEST_23_ASM
mov TEST_REG_ASM, DRREG_TEST_23_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* app2app phase will reserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XAX, 0
mov REG_XAX, PTRSZ [REG_XAX] /* crash */
/* insertion phase will reserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* app2app phase will release aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* insertion phase will release aflags here. */
jmp epilog23
epilog23:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test23
test23:
movw TEST_REG_ASM, DRREG_TEST_23_ASM
movw TEST_REG_ASM, DRREG_TEST_23_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog23
epilog23:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test23
test23:
movz TEST_REG_ASM, DRREG_TEST_23_ASM
movz TEST_REG_ASM, DRREG_TEST_23_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog23
epilog23:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 24: restore on fault for aflags restored once (for app read)
* before crash. This is to verify that the drreg state restoration
* logic doesn't forget a spill slot after it sees one restore (like
* for an app read instr),
*/
#define FUNCNAME test_asm_fault_restore_aflags_restored_for_read
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test24
test24:
mov TEST_REG_ASM, DRREG_TEST_24_ASM
mov TEST_REG_ASM, DRREG_TEST_24_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XAX, 0
mov REG_XAX, PTRSZ [REG_XAX] /* crash */
/* Read aflags so that it is restored once. */
seto al
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
jmp epilog24
epilog24:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test24
test24:
movw TEST_REG_ASM, DRREG_TEST_24_ASM
movw TEST_REG_ASM, DRREG_TEST_24_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
/* Read aflags so that it is restored once. */
mrs TEST_REG2_ASM, APSR
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog24
epilog24:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test24
test24:
movz TEST_REG_ASM, DRREG_TEST_24_ASM
movz TEST_REG_ASM, DRREG_TEST_24_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
/* Read aflags so that it is restored once. */
mrs TEST_REG2_ASM, nzcv
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog24
epilog24:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 25: Test restore on fault for aflags reserved in multiple
* phases, where the two spill regions are nested, and the first
* phase doesn't write the aflags before the second reservation. This
* is to verify that drreg state restoration logic remembers that
* the app value can be found in both the spill slots.
*/
#define FUNCNAME test_asm_fault_restore_multi_phase_native_aflags_spilled_twice
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test25
test25:
mov TEST_REG_ASM, DRREG_TEST_25_ASM
mov TEST_REG_ASM, DRREG_TEST_25_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* - app2app reserves aflags here, but doesn't write it.
* - insertion reserves aflags here, which may confuse the
* state restoration logic into overwritting the spill slot for
* aflags as the new slot also has its native value.
*/
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XAX, 0
mov REG_XAX, PTRSZ [REG_XAX] /* crash */
/* - insertion phase unreserves aflags and frees the spill
* slot.
*/
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* - insertion phase reserves TEST_REG_ASM which would use the
* same spill slot as freed above, and overwrite the aflags
* value stored there currently. After this native aflags can
* only be found in its app2app spill slot.
* - insertion phase writes to aflags so that we need to
* restore it.
*/
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* app2app phase unreserves aflags. */
jmp epilog25
epilog25:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test25
test25:
movw TEST_REG_ASM, DRREG_TEST_25_ASM
movw TEST_REG_ASM, DRREG_TEST_25_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog25
epilog25:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test25
test25:
movz TEST_REG_ASM, DRREG_TEST_25_ASM
movz TEST_REG_ASM, DRREG_TEST_25_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog25
epilog25:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 26: fault aflags restore from spill slot for fragment emitting
* DR_EMIT_STORE_TRANSLATIONS. This uses the state restoration logic
* without the faulting fragment's ilist.
*/
#define FUNCNAME test_asm_fault_restore_aflags_in_slot_store_xl8
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test26
test26:
mov TEST_REG_ASM, DRREG_TEST_26_ASM
mov TEST_REG_ASM, DRREG_TEST_26_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
nop
mov REG_XAX, 0
mov REG_XAX, PTRSZ [REG_XAX] /* crash */
jmp epilog26
epilog26:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test26
test26:
movw TEST_REG_ASM, DRREG_TEST_26_ASM
movw TEST_REG_ASM, DRREG_TEST_26_ASM
/* XXX: also test GE flags */
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
nop
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
b epilog26
epilog26:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test26
test26:
movz TEST_REG_ASM, DRREG_TEST_26_ASM
movz TEST_REG_ASM, DRREG_TEST_26_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
nop
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
b epilog26
epilog26:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 27: restore on fault for aflags stored in xax without preceding
* xax spill, for fragments emitting DR_EMIT_STORE_TRANSLATIONS. This
* uses the state restoration logic without ilist.
*/
#define FUNCNAME test_asm_fault_restore_aflags_in_xax_store_xl8
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test27
test27:
mov TEST_REG_ASM, DRREG_TEST_27_ASM
mov TEST_REG_ASM, DRREG_TEST_27_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
nop
ud2
/* xax is dead, so should not need to spill aflags to slot. */
mov REG_XAX, 0
jmp epilog27
epilog27:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
/* This test does not have AArchXX variants. */
#elif defined(ARM)
bx lr
#elif defined(AARCH64)
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 29: restore on fault for aflags stored in slot. In this test,
* when aflags are spilled, xax was already reserved and in-use. This
* is to verify that aflags are spilled using xax only.
*/
#define FUNCNAME test_asm_fault_restore_aflags_xax_already_spilled
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test29
test29:
mov TEST_REG_ASM, DRREG_TEST_29_ASM
mov TEST_REG_ASM, DRREG_TEST_29_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* xax is reserved here */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
/* aflags are reserved here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
ud2
jmp epilog29
epilog29:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
/* This test does not have AArchXX variants. */
#elif defined(ARM)
bx lr
#elif defined(AARCH64)
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 30: Test restoration of gpr when it was spilled to mcontext
* later by non-drreg routines. This is to verify that drreg's state
* restoration works even in presence of non-drreg spills and restores.
*/
#define FUNCNAME test_asm_fault_restore_gpr_spilled_to_mcontext_later
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test30
test30:
mov TEST_REG_ASM, DRREG_TEST_30_ASM
mov TEST_REG_ASM, DRREG_TEST_30_ASM
/* TEST_REG_ASM will be spilled using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
ud2
/* TEST_REG_ASM will be restored using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* TEST_REG_ASM will be spilled and restored from mcontext here. */
jmp epilog30
epilog30:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test30
test30:
movw TEST_REG_ASM, DRREG_TEST_30_ASM
movw TEST_REG_ASM, DRREG_TEST_30_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.word 0xe7f000f0 /* udf */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog30
epilog30:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test30
test30:
movz TEST_REG_ASM, DRREG_TEST_30_ASM
movz TEST_REG_ASM, DRREG_TEST_30_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.inst 0xf36d19 /* udf */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog30
epilog30:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 31: Test restoration of aflags when they were spilled to
* mcontext later by non-drreg routines. This is to verify that
* drreg's state restoration works even in presence of non-drreg
* spills and restores.
*/
#define FUNCNAME test_asm_fault_restore_aflags_spilled_to_mcontext_later
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test31
test31:
mov TEST_REG_ASM, DRREG_TEST_31_ASM
mov TEST_REG_ASM, DRREG_TEST_31_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* aflags will be spilled using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XCX, 0
mov REG_XCX, PTRSZ [REG_XCX] /* crash */
/* aflags will be restored using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* aflags will be spilled and restored from mcontext here. */
jmp epilog31
epilog31:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test31
test31:
movw TEST_REG_ASM, DRREG_TEST_31_ASM
movw TEST_REG_ASM, DRREG_TEST_31_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r1, HEX(0)
ldr r1, PTRSZ [r1] /* crash */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog31
epilog31:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test31
test31:
movz TEST_REG_ASM, DRREG_TEST_31_ASM
movz TEST_REG_ASM, DRREG_TEST_31_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x1, HEX(0)
ldr x1, PTRSZ [x1] /* crash */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog31
epilog31:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 32: Test restoration of mcontext reg that was reserved also
* using non-drreg routines during clean call instrumentation.
*/
#define FUNCNAME test_asm_fault_restore_gpr_spilled_during_clean_call_later
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test32
test32:
mov TEST_REG_ASM, DRREG_TEST_32_ASM
mov TEST_REG_ASM, DRREG_TEST_32_ASM
mov TEST_REG_CLEAN_CALL_MCONTEXT_ASM, DRREG_TEST_32_ASM
/* TEST_REG_CLEAN_CALL_MCONTEXT_ASM will be spilled using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
ud2
/* TEST_REG_CLEAN_CALL_MCONTEXT_ASM will be restored using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* Clean call will be added here. */
jmp epilog32
epilog32:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test32
test32:
movw TEST_REG_ASM, DRREG_TEST_32_ASM
movw TEST_REG_ASM, DRREG_TEST_32_ASM
movw TEST_REG_CLEAN_CALL_MCONTEXT_ASM, DRREG_TEST_32_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.word 0xe7f000f0 /* udf */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog32
epilog32:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test32
test32:
movz TEST_REG_ASM, DRREG_TEST_32_ASM
movz TEST_REG_ASM, DRREG_TEST_32_ASM
movz TEST_REG_CLEAN_CALL_MCONTEXT_ASM, DRREG_TEST_32_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.inst 0xf36d19 /* udf */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog32
epilog32:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 33: Test restoration for aflags reserved also during clean call
* instrumentation.
*/
#define FUNCNAME test_asm_fault_restore_aflags_spilled_during_clean_call_later
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test33
test33:
mov TEST_REG_ASM, DRREG_TEST_33_ASM
mov TEST_REG_ASM, DRREG_TEST_33_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* aflags will be spilled using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XCX, 0
mov REG_XCX, PTRSZ [REG_XCX] /* crash */
/* aflags will be restored using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* Clean call will be added here. */
jmp epilog33
epilog33:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test33
test33:
movw TEST_REG_ASM, DRREG_TEST_33_ASM
movw TEST_REG_ASM, DRREG_TEST_33_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r1, HEX(0)
ldr r1, PTRSZ [r1] /* crash */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog33
epilog33:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test33
test33:
movz TEST_REG_ASM, DRREG_TEST_33_ASM
movz TEST_REG_ASM, DRREG_TEST_33_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x1, HEX(0)
ldr x1, PTRSZ [x1] /* crash */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog33
epilog33:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 34: Test restoration of gpr when it was spilled to mcontext
* during its drreg spill region. This is to verify that drreg's
* state restoration works even in presence of non-drreg spills
* and restores.
*/
#define FUNCNAME test_asm_fault_restore_gpr_spilled_to_mcontext_between
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test34
test34:
mov TEST_REG_ASM, DRREG_TEST_34_ASM
mov TEST_REG_ASM, DRREG_TEST_34_ASM
/* TEST_REG_ASM will be spilled using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
ud2
/* TEST_REG_ASM will be spilled and restored to mcontext here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* TEST_REG_ASM will be restored using drreg here. */
jmp epilog34
epilog34:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test34
test34:
movw TEST_REG_ASM, DRREG_TEST_34_ASM
movw TEST_REG_ASM, DRREG_TEST_34_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.word 0xe7f000f0 /* udf */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog34
epilog34:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test34
test34:
movz TEST_REG_ASM, DRREG_TEST_34_ASM
movz TEST_REG_ASM, DRREG_TEST_34_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.inst 0xf36d19 /* udf */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog34
epilog34:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 35: Test restoration of aflags when they were spilled to
* mcontext during its drreg spill region by non-drreg routines.
* This is to verify that drreg's state restoration works even
* in presence of non-drreg spills and restores.
*/
#define FUNCNAME test_asm_fault_restore_aflags_spilled_to_mcontext_between
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test35
test35:
mov TEST_REG_ASM, DRREG_TEST_35_ASM
mov TEST_REG_ASM, DRREG_TEST_35_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* aflags will be spilled using drreg here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XCX, 0
mov REG_XCX, PTRSZ [REG_XCX] /* crash */
/* aflags will be spilled and restored to mcontext here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* aflags will be restored using drreg here. */
jmp epilog35
epilog35:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test35
test35:
movw TEST_REG_ASM, DRREG_TEST_35_ASM
movw TEST_REG_ASM, DRREG_TEST_35_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r1, HEX(0)
ldr r1, PTRSZ [r1] /* crash */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog35
epilog35:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test35
test35:
movz TEST_REG_ASM, DRREG_TEST_35_ASM
movz TEST_REG_ASM, DRREG_TEST_35_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x1, HEX(0)
ldr x1, PTRSZ [x1] /* crash */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
b epilog35
epilog35:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 36: restore on fault for gpr reserved in multiple phases,
* where the two spill regions are nested, and the insertion phase
* spill region is the outer one.
*/
#define FUNCNAME test_asm_fault_restore_multi_phase_gpr_nested_spill_regions_insertion_outer
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test36
test36:
mov TEST_REG_ASM, DRREG_TEST_36_ASM
mov TEST_REG_ASM, DRREG_TEST_36_ASM
/* insertion phase will reserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
ud2
/* app2app phase will reserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* app2app phase will unreserve TEST_REG_ASM here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* insertion phase will unreserve TEST_REG_ASM here. */
jmp epilog36
epilog36:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test36
test36:
movw TEST_REG_ASM, DRREG_TEST_36_ASM
movw TEST_REG_ASM, DRREG_TEST_36_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.word 0xe7f000f0 /* udf */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog36
epilog36:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test36
test36:
movz TEST_REG_ASM, DRREG_TEST_36_ASM
movz TEST_REG_ASM, DRREG_TEST_36_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
.inst 0xf36d19 /* udf */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog36
epilog36:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
/* Test 37: restore on fault for aflags reserved in multiple phases,
* where the two spill regions are nested, and the insertion phase
* spill region is the outer one.
*/
#define FUNCNAME test_asm_fault_restore_multi_phase_aflags_nested_spill_regions_insertion_outer
DECLARE_FUNC_SEH(FUNCNAME)
GLOBAL_LABEL(FUNCNAME:)
#ifdef X86
PUSH_CALLEE_SAVED_REGS()
sub REG_XSP, FRAME_PADDING /* align */
END_PROLOG
jmp test37
test37:
mov TEST_REG_ASM, DRREG_TEST_37_ASM
mov TEST_REG_ASM, DRREG_TEST_37_ASM
mov ah, DRREG_TEST_AFLAGS_ASM
sahf
/* insertion phase will reserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov REG_XAX, 0
mov REG_XAX, PTRSZ [REG_XAX] /* crash */
/* app2app phase will reserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
/* app2app phase will unreserve aflags here. */
mov TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
/* insertion phase will unreserve aflags here. */
jmp epilog37
epilog37:
add REG_XSP, FRAME_PADDING /* make a legal SEH64 epilog */
POP_CALLEE_SAVED_REGS()
ret
#elif defined(ARM)
/* XXX i#3289: prologue missing */
b test37
test37:
movw TEST_REG_ASM, DRREG_TEST_37_ASM
movw TEST_REG_ASM, DRREG_TEST_37_ASM
msr APSR_nzcvq, DRREG_TEST_AFLAGS_ASM
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov r0, HEX(0)
ldr r0, PTRSZ [r0] /* crash */
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movw TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog37
epilog37:
bx lr
#elif defined(AARCH64)
/* XXX i#3289: prologue missing */
b test37
test37:
movz TEST_REG_ASM, DRREG_TEST_37_ASM
movz TEST_REG_ASM, DRREG_TEST_37_ASM
movz TEST_REG2_ASM, DRREG_TEST_AFLAGS_H_ASM, LSL 16
msr nzcv, TEST_REG2_ASM
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_1
mov x0, HEX(0)
ldr x0, PTRSZ [x0] /* crash */
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_2
movz TEST_REG2_ASM, TEST_INSTRUMENTATION_MARKER_3
b epilog37
epilog37:
ret
#endif
END_FUNC(FUNCNAME)
#undef FUNCNAME
START_DATA
/* Should be atleast (TEST_FAUX_SPILL_TLS_OFFS+1)*8 bytes.
* Cannot use the macro as the expression needs to be
* absolute.
*/
BYTES_ARR(some_data, (1000+1)*8)
END_FILE
#endif
/* clang-format on */
| 1 | 25,207 | In this test, we want xax to be dead, so that aflags are not spilled to a slot. | DynamoRIO-dynamorio | c |
@@ -54,7 +54,9 @@ type Options struct {
func newOptions() *Options {
return &Options{
- config: new(AgentConfig),
+ config: &AgentConfig{
+ EnablePrometheusMetrics: true,
+ },
}
}
| 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"io/ioutil"
"net"
"strings"
"time"
"github.com/spf13/pflag"
"gopkg.in/yaml.v2"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/apis"
"github.com/vmware-tanzu/antrea/pkg/cni"
"github.com/vmware-tanzu/antrea/pkg/features"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsconfig"
)
const (
defaultOVSBridge = "br-int"
defaultHostGateway = "antrea-gw0"
defaultHostProcPathPrefix = "/host"
defaultServiceCIDR = "10.96.0.0/12"
defaultTunnelType = ovsconfig.GeneveTunnel
defaultFlowPollInterval = 5 * time.Second
defaultFlowExportFrequency = 12
)
type Options struct {
// The path of configuration file.
configFile string
// The configuration object
config *AgentConfig
// IPFIX flow collector
flowCollector net.Addr
// Flow exporter poll interval
pollInterval time.Duration
}
func newOptions() *Options {
return &Options{
config: new(AgentConfig),
}
}
// addFlags adds flags to fs and binds them to options.
func (o *Options) addFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.configFile, "config", o.configFile, "The path to the configuration file")
}
// complete completes all the required options.
func (o *Options) complete(args []string) error {
if len(o.configFile) > 0 {
c, err := o.loadConfigFromFile(o.configFile)
if err != nil {
return err
}
o.config = c
}
o.setDefaults()
return features.DefaultMutableFeatureGate.SetFromMap(o.config.FeatureGates)
}
// validate validates all the required options. It must be called after complete.
func (o *Options) validate(args []string) error {
if len(args) != 0 {
return fmt.Errorf("no positional arguments are supported")
}
// Validate service CIDR configuration
_, _, err := net.ParseCIDR(o.config.ServiceCIDR)
if err != nil {
return fmt.Errorf("Service CIDR %s is invalid", o.config.ServiceCIDR)
}
if o.config.ServiceCIDRv6 != "" {
_, _, err := net.ParseCIDR(o.config.ServiceCIDRv6)
if err != nil {
return fmt.Errorf("Service CIDR v6 %s is invalid", o.config.ServiceCIDRv6)
}
}
if o.config.TunnelType != ovsconfig.VXLANTunnel && o.config.TunnelType != ovsconfig.GeneveTunnel &&
o.config.TunnelType != ovsconfig.GRETunnel && o.config.TunnelType != ovsconfig.STTTunnel {
return fmt.Errorf("tunnel type %s is invalid", o.config.TunnelType)
}
if o.config.EnableIPSecTunnel && o.config.TunnelType != ovsconfig.GRETunnel {
return fmt.Errorf("IPSec encyption is supported only for GRE tunnel")
}
if o.config.OVSDatapathType != ovsconfig.OVSDatapathSystem && o.config.OVSDatapathType != ovsconfig.OVSDatapathNetdev {
return fmt.Errorf("OVS datapath type %s is not supported", o.config.OVSDatapathType)
}
ok, encapMode := config.GetTrafficEncapModeFromStr(o.config.TrafficEncapMode)
if !ok {
return fmt.Errorf("TrafficEncapMode %s is unknown", o.config.TrafficEncapMode)
}
// Check if the enabled features are supported on the OS.
err = o.checkUnsupportedFeatures()
if err != nil {
return err
}
if encapMode.SupportsNoEncap() {
if !features.DefaultFeatureGate.Enabled(features.AntreaProxy) {
return fmt.Errorf("TrafficEncapMode %s requires AntreaProxy to be enabled", o.config.TrafficEncapMode)
}
if o.config.EnableIPSecTunnel {
return fmt.Errorf("IPsec tunnel may only be enabled in %s mode", config.TrafficEncapModeEncap)
}
}
if o.config.NoSNAT && !(encapMode == config.TrafficEncapModeNoEncap || encapMode == config.TrafficEncapModeNetworkPolicyOnly) {
return fmt.Errorf("noSNAT is only applicable to the %s mode", config.TrafficEncapModeNoEncap)
}
if encapMode == config.TrafficEncapModeNetworkPolicyOnly {
// In the NetworkPolicyOnly mode, Antrea will not perform SNAT
// (but SNAT can be done by the primary CNI).
o.config.NoSNAT = true
}
if err := o.validateFlowExporterConfig(); err != nil {
return fmt.Errorf("failed to validate flow exporter config: %v", err)
}
return nil
}
func (o *Options) loadConfigFromFile(file string) (*AgentConfig, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
var c AgentConfig
err = yaml.UnmarshalStrict(data, &c)
if err != nil {
return nil, err
}
return &c, nil
}
func (o *Options) setDefaults() {
if o.config.CNISocket == "" {
o.config.CNISocket = cni.AntreaCNISocketAddr
}
if o.config.OVSBridge == "" {
o.config.OVSBridge = defaultOVSBridge
}
if o.config.OVSDatapathType == "" {
o.config.OVSDatapathType = ovsconfig.OVSDatapathSystem
}
if o.config.OVSRunDir == "" {
o.config.OVSRunDir = ovsconfig.DefaultOVSRunDir
}
if o.config.HostGateway == "" {
o.config.HostGateway = defaultHostGateway
}
if o.config.TrafficEncapMode == "" {
o.config.TrafficEncapMode = config.TrafficEncapModeEncap.String()
}
if o.config.TunnelType == "" {
o.config.TunnelType = defaultTunnelType
}
if o.config.HostProcPathPrefix == "" {
o.config.HostProcPathPrefix = defaultHostProcPathPrefix
}
if o.config.ServiceCIDR == "" {
o.config.ServiceCIDR = defaultServiceCIDR
}
if o.config.APIPort == 0 {
o.config.APIPort = apis.AntreaAgentAPIPort
}
if o.config.FeatureGates[string(features.FlowExporter)] {
if o.config.FlowPollInterval == "" {
o.pollInterval = defaultFlowPollInterval
}
if o.config.FlowExportFrequency == 0 {
// This frequency value makes flow export interval as 60s by default.
o.config.FlowExportFrequency = defaultFlowExportFrequency
}
}
}
func (o *Options) validateFlowExporterConfig() error {
if features.DefaultFeatureGate.Enabled(features.FlowExporter) {
if o.config.FlowCollectorAddr == "" {
return fmt.Errorf("IPFIX flow collector address should be provided")
} else {
// Check if it is TCP or UDP
strSlice := strings.Split(o.config.FlowCollectorAddr, ":")
var proto string
if len(strSlice) == 2 {
// If no separator ":" and proto is given, then default to TCP.
proto = "tcp"
} else if len(strSlice) > 2 {
if (strSlice[2] != "udp") && (strSlice[2] != "tcp") {
return fmt.Errorf("IPFIX flow collector over %s proto is not supported", strSlice[2])
}
proto = strSlice[2]
} else {
return fmt.Errorf("IPFIX flow collector is given in invalid format")
}
// Convert the string input in net.Addr format
hostPortAddr := strSlice[0] + ":" + strSlice[1]
_, _, err := net.SplitHostPort(hostPortAddr)
if err != nil {
return fmt.Errorf("IPFIX flow collector is given in invalid format: %v", err)
}
if proto == "udp" {
o.flowCollector, err = net.ResolveUDPAddr("udp", hostPortAddr)
if err != nil {
return fmt.Errorf("IPFIX flow collector over UDP proto cannot be resolved: %v", err)
}
} else {
o.flowCollector, err = net.ResolveTCPAddr("tcp", hostPortAddr)
if err != nil {
return fmt.Errorf("IPFIX flow collector over TCP proto cannot be resolved: %v", err)
}
}
}
if o.config.FlowPollInterval != "" {
var err error
o.pollInterval, err = time.ParseDuration(o.config.FlowPollInterval)
if err != nil {
return fmt.Errorf("FlowPollInterval is not provided in right format: %v", err)
}
if o.pollInterval < time.Second {
return fmt.Errorf("FlowPollInterval should be greater than or equal to one second")
}
}
}
return nil
}
| 1 | 28,539 | I remember there were some misleading code about this but forgot to correct them. Although we initialize `config` here, it was discarded in `complete`, so I guess setting the default value here doesn't take effect. We should change L139-L140 as well to use the initialized `config`. So do antrea-controller. | antrea-io-antrea | go |
@@ -364,4 +364,13 @@ describe RSpec::Core::Formatters::BaseTextFormatter do
output.string.should =~ /, 100.0% of total time\):/
end
end
+
+ describe "custom_colors" do
+ before { RSpec.configuration.stub(:color_enabled?) { true } }
+ it "uses the custom success color" do
+ RSpec.configuration.stub(:success_color).and_return(:cyan)
+ formatter.dump_summary(0,1,0,0)
+ output.string.should include("\e[36m")
+ end
+ end
end | 1 | require 'spec_helper'
require 'rspec/core/formatters/base_text_formatter'
describe RSpec::Core::Formatters::BaseTextFormatter do
let(:output) { StringIO.new }
let(:formatter) { RSpec::Core::Formatters::BaseTextFormatter.new(output) }
describe "#summary_line" do
it "with 0s outputs pluralized (excluding pending)" do
formatter.summary_line(0,0,0).should eq("0 examples, 0 failures")
end
it "with 1s outputs singular (including pending)" do
formatter.summary_line(1,1,1).should eq("1 example, 1 failure, 1 pending")
end
it "with 2s outputs pluralized (including pending)" do
formatter.summary_line(2,2,2).should eq("2 examples, 2 failures, 2 pending")
end
end
describe "#dump_commands_to_rerun_failed_examples" do
it "includes command to re-run each failed example" do
group = RSpec::Core::ExampleGroup.describe("example group") do
it("fails") { fail }
end
line = __LINE__ - 2
group.run(formatter)
formatter.dump_commands_to_rerun_failed_examples
output.string.should include("rspec #{RSpec::Core::Metadata::relative_path("#{__FILE__}:#{line}")} # example group fails")
end
end
describe "#dump_failures" do
let(:group) { RSpec::Core::ExampleGroup.describe("group name") }
before { RSpec.configuration.stub(:color_enabled?) { false } }
def run_all_and_dump_failures
group.run(formatter)
formatter.dump_failures
end
it "preserves formatting" do
group.example("example name") { "this".should eq("that") }
run_all_and_dump_failures
output.string.should =~ /group name example name/m
output.string.should =~ /(\s+)expected: \"that\"\n\1 got: \"this\"/m
end
context "with an exception without a message" do
it "does not throw NoMethodError" do
exception_without_message = Exception.new()
exception_without_message.stub(:message) { nil }
group.example("example name") { raise exception_without_message }
expect { run_all_and_dump_failures }.not_to raise_error(NoMethodError)
end
it "preserves ancestry" do
example = group.example("example name") { raise "something" }
run_all_and_dump_failures
example.example_group.parent_groups.size.should == 1
end
end
context "with an exception that has an exception instance as its message" do
it "does not raise NoMethodError" do
gonzo_exception = RuntimeError.new
gonzo_exception.stub(:message) { gonzo_exception }
group.example("example name") { raise gonzo_exception }
expect { run_all_and_dump_failures }.not_to raise_error(NoMethodError)
end
end
context "with an exception class other than RSpec" do
it "does not show the error class" do
group.example("example name") { raise NameError.new('foo') }
run_all_and_dump_failures
output.string.should =~ /NameError/m
end
end
context "with a failed expectation (rspec-expectations)" do
it "does not show the error class" do
group.example("example name") { "this".should eq("that") }
run_all_and_dump_failures
output.string.should_not =~ /RSpec/m
end
end
context "with a failed message expectation (rspec-mocks)" do
it "does not show the error class" do
group.example("example name") { "this".should_receive("that") }
run_all_and_dump_failures
output.string.should_not =~ /RSpec/m
end
end
context 'for #share_examples_for' do
it 'outputs the name and location' do
share_examples_for 'foo bar' do
it("example name") { "this".should eq("that") }
end
line = __LINE__.next
group.it_should_behave_like('foo bar')
run_all_and_dump_failures
output.string.should include(
'Shared Example Group: "foo bar" called from ' +
"./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}"
)
end
context 'that contains nested example groups' do
it 'outputs the name and location' do
share_examples_for 'foo bar' do
describe 'nested group' do
it("example name") { "this".should eq("that") }
end
end
line = __LINE__.next
group.it_should_behave_like('foo bar')
run_all_and_dump_failures
output.string.should include(
'Shared Example Group: "foo bar" called from ' +
"./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}"
)
end
end
end
context 'for #share_as' do
before { RSpec.stub(:warn) }
it 'outputs the name and location' do
share_as :FooBar do
it("example name") { "this".should eq("that") }
end
line = __LINE__.next
group.send(:include, FooBar)
run_all_and_dump_failures
output.string.should include(
'Shared Example Group: "FooBar" called from ' +
"./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}"
)
end
context 'that contains nested example groups' do
it 'outputs the name and location' do
share_as :NestedFoo do
describe 'nested group' do
describe 'hell' do
it("example name") { "this".should eq("that") }
end
end
end
line = __LINE__.next
group.send(:include, NestedFoo)
run_all_and_dump_failures
output.string.should include(
'Shared Example Group: "NestedFoo" called from ' +
"./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}"
)
end
end
end
end
describe "#dump_pending" do
let(:group) { RSpec::Core::ExampleGroup.describe("group name") }
before { RSpec.configuration.stub(:color_enabled?) { false } }
def run_all_and_dump_pending
group.run(formatter)
formatter.dump_pending
end
context "with show_failures_in_pending_blocks setting enabled" do
before { RSpec.configuration.stub(:show_failures_in_pending_blocks?) { true } }
it "preserves formatting" do
group.example("example name") { pending { "this".should eq("that") } }
run_all_and_dump_pending
output.string.should =~ /group name example name/m
output.string.should =~ /(\s+)expected: \"that\"\n\1 got: \"this\"/m
end
context "with an exception without a message" do
it "does not throw NoMethodError" do
exception_without_message = Exception.new()
exception_without_message.stub(:message) { nil }
group.example("example name") { pending { raise exception_without_message } }
expect { run_all_and_dump_pending }.not_to raise_error(NoMethodError)
end
end
context "with an exception class other than RSpec" do
it "does not show the error class" do
group.example("example name") { pending { raise NameError.new('foo') } }
run_all_and_dump_pending
output.string.should =~ /NameError/m
end
end
context "with a failed expectation (rspec-expectations)" do
it "does not show the error class" do
group.example("example name") { pending { "this".should eq("that") } }
run_all_and_dump_pending
output.string.should_not =~ /RSpec/m
end
end
context "with a failed message expectation (rspec-mocks)" do
it "does not show the error class" do
group.example("example name") { pending { "this".should_receive("that") } }
run_all_and_dump_pending
output.string.should_not =~ /RSpec/m
end
end
context 'for #share_examples_for' do
it 'outputs the name and location' do
share_examples_for 'foo bar' do
it("example name") { pending { "this".should eq("that") } }
end
line = __LINE__.next
group.it_should_behave_like('foo bar')
run_all_and_dump_pending
output.string.should include(
'Shared Example Group: "foo bar" called from ' +
"./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}"
)
end
context 'that contains nested example groups' do
it 'outputs the name and location' do
share_examples_for 'foo bar' do
describe 'nested group' do
it("example name") { pending { "this".should eq("that") } }
end
end
line = __LINE__.next
group.it_should_behave_like('foo bar')
run_all_and_dump_pending
output.string.should include(
'Shared Example Group: "foo bar" called from ' +
"./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}"
)
end
end
end
context 'for #share_as' do
before { RSpec.stub(:warn) }
it 'outputs the name and location' do
share_as :FooBar2 do
it("example name") { pending { "this".should eq("that") } }
end
line = __LINE__.next
group.send(:include, FooBar2)
run_all_and_dump_pending
output.string.should include(
'Shared Example Group: "FooBar2" called from ' +
"./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}"
)
end
context 'that contains nested example groups' do
it 'outputs the name and location' do
share_as :NestedFoo2 do
describe 'nested group' do
describe 'hell' do
it("example name") { pending { "this".should eq("that") } }
end
end
end
line = __LINE__.next
group.send(:include, NestedFoo2)
run_all_and_dump_pending
output.string.should include(
'Shared Example Group: "NestedFoo2" called from ' +
"./spec/rspec/core/formatters/base_text_formatter_spec.rb:#{line}"
)
end
end
end
end
context "with show_failures_in_pending_blocks setting disabled" do
before { RSpec.configuration.stub(:show_failures_in_pending_blocks?) { false } }
it "does not output the failure information" do
group.example("example name") { pending { "this".should eq("that") } }
run_all_and_dump_pending
output.string.should_not =~ /(\s+)expected: \"that\"\n\1 got: \"this\"/m
end
end
end
describe "#dump_profile" do
before do
group = RSpec::Core::ExampleGroup.describe("group") do
example("example") { sleep 0.1 }
end
group.run(double('reporter').as_null_object)
formatter.stub(:examples) { group.examples }
end
it "names the example" do
formatter.dump_profile
output.string.should =~ /group example/m
end
it "prints the time" do
formatter.dump_profile
output.string.should =~ /0(\.\d+)? seconds/
end
it "prints the path" do
formatter.dump_profile
filename = __FILE__.split(File::SEPARATOR).last
output.string.should =~ /#{filename}\:#{__LINE__ - 21}/
end
it "prints the percentage taken from the total runtime" do
formatter.dump_profile
output.string.should =~ /, 100.0% of total time\):/
end
end
end
| 1 | 8,207 | Stubbin a value object (like `RSpec.configuration`) is a bit of a code smell, I think. You can just set `color_enabled` and `success_color` through the configuration APIs provided by `RSpec::Configuration` -- no need to stub. The `sandboxed` thing in `spec_helper.rb` takes care of preventing changes to the configuration object in one example from leaking into other examples. | rspec-rspec-core | rb |
@@ -624,9 +624,14 @@ API.prototype.getBalanceFromPrivateKey = function(privateKey, coin, cb) {
var B = Bitcore_[coin];
var privateKey = new B.PrivateKey(privateKey);
- var address = privateKey.publicKey.toAddress();
+
+ var address = privateKey.publicKey.toAddress().toString();
+ if (coin == 'bch') {
+ address = privateKey.publicKey.toAddress().toCashAddress().replace('bitcoincash:', '');
+ }
+
self.getUtxos({
- addresses: coin == 'bch' ? address.toLegacyAddress() : address.toString(),
+ addresses: address,
}, function(err, utxos) {
if (err) return cb(err);
return cb(null, _.sumBy(utxos, 'satoshis')); | 1 | 'use strict';
var _ = require('lodash');
var $ = require('preconditions').singleton();
var util = require('util');
var async = require('async');
var events = require('events');
var Bitcore = require('bitcore-lib');
var Bitcore_ = {
btc: Bitcore,
bch: require('bitcore-lib-cash'),
};
var Mnemonic = require('bitcore-mnemonic');
var sjcl = require('sjcl');
var url = require('url');
var querystring = require('querystring');
var Common = require('./common');
var Constants = Common.Constants;
var Defaults = Common.Defaults;
var Utils = Common.Utils;
var PayPro = require('./paypro');
var log = require('./log');
var Credentials = require('./credentials');
var Verifier = require('./verifier');
var Errors = require('./errors');
const Request = require('./request');
var BASE_URL = 'http://localhost:3232/bws/api';
/**
* @desc ClientAPI constructor.
*
* @param {Object} opts
* @constructor
*/
function API(opts) {
opts = opts || {};
this.doNotVerifyPayPro = opts.doNotVerifyPayPro;
this.timeout = opts.timeout || 50000;
this.logLevel = opts.logLevel || 'silent';
this.supportStaffWalletId = opts.supportStaffWalletId;
this.request = new Request(opts.baseUrl || BASE_URL, {r: opts.request});
log.setLevel(this.logLevel);
};
util.inherits(API, events.EventEmitter);
API.privateKeyEncryptionOpts = {
iter: 10000
};
API.prototype.initNotifications = function(cb) {
log.warn('DEPRECATED: use initialize() instead.');
this.initialize({}, cb);
};
API.prototype.initialize = function(opts, cb) {
$.checkState(this.credentials);
var self = this;
self.notificationIncludeOwn = !!opts.notificationIncludeOwn;
self._initNotifications(opts);
return cb();
};
API.prototype.dispose = function(cb) {
var self = this;
self._disposeNotifications();
self.request.logout(cb);
};
API.prototype._fetchLatestNotifications = function(interval, cb) {
var self = this;
cb = cb || function() {};
var opts = {
lastNotificationId: self.lastNotificationId,
includeOwn: self.notificationIncludeOwn,
};
if (!self.lastNotificationId) {
opts.timeSpan = interval + 1;
}
self.getNotifications(opts, function(err, notifications) {
if (err) {
log.warn('Error receiving notifications.');
log.debug(err);
return cb(err);
}
if (notifications.length > 0) {
self.lastNotificationId = _.last(notifications).id;
}
_.each(notifications, function(notification) {
self.emit('notification', notification);
});
return cb();
});
};
API.prototype._initNotifications = function(opts) {
var self = this;
opts = opts || {};
var interval = opts.notificationIntervalSeconds || 5;
self.notificationsIntervalId = setInterval(function() {
self._fetchLatestNotifications(interval, function(err) {
if (err) {
if (err instanceof Errors.NOT_FOUND || err instanceof Errors.NOT_AUTHORIZED) {
self._disposeNotifications();
}
}
});
}, interval * 1000);
};
API.prototype._disposeNotifications = function() {
var self = this;
if (self.notificationsIntervalId) {
clearInterval(self.notificationsIntervalId);
self.notificationsIntervalId = null;
}
};
/**
* Reset notification polling with new interval
* @param {Numeric} notificationIntervalSeconds - use 0 to pause notifications
*/
API.prototype.setNotificationsInterval = function(notificationIntervalSeconds) {
var self = this;
self._disposeNotifications();
if (notificationIntervalSeconds > 0) {
self._initNotifications({
notificationIntervalSeconds: notificationIntervalSeconds
});
}
};
/**
* Encrypt a message
* @private
* @static
* @memberof Client.API
* @param {String} message
* @param {String} encryptingKey
*/
API._encryptMessage = function(message, encryptingKey) {
if (!message) return null;
return Utils.encryptMessage(message, encryptingKey);
};
API.prototype._processTxNotes = function(notes) {
var self = this;
if (!notes) return;
var encryptingKey = self.credentials.sharedEncryptingKey;
_.each([].concat(notes), function(note) {
note.encryptedBody = note.body;
note.body = Utils.decryptMessageNoThrow(note.body, encryptingKey);
note.encryptedEditedByName = note.editedByName;
note.editedByName = Utils.decryptMessageNoThrow(note.editedByName, encryptingKey);
});
};
/**
* Decrypt text fields in transaction proposals
* @private
* @static
* @memberof Client.API
* @param {Array} txps
* @param {String} encryptingKey
*/
API.prototype._processTxps = function(txps) {
var self = this;
if (!txps) return;
var encryptingKey = self.credentials.sharedEncryptingKey;
_.each([].concat(txps), function(txp) {
txp.encryptedMessage = txp.message;
txp.message = Utils.decryptMessageNoThrow(txp.message, encryptingKey) || null;
txp.creatorName = Utils.decryptMessageNoThrow(txp.creatorName, encryptingKey);
_.each(txp.actions, function(action) {
// CopayerName encryption is optional (not available in older wallets)
action.copayerName = Utils.decryptMessageNoThrow(action.copayerName, encryptingKey);
action.comment = Utils.decryptMessageNoThrow(action.comment, encryptingKey);
// TODO get copayerName from Credentials -> copayerId to copayerName
// action.copayerName = null;
});
_.each(txp.outputs, function(output) {
output.encryptedMessage = output.message;
output.message = Utils.decryptMessageNoThrow(output.message, encryptingKey) || null;
});
txp.hasUnconfirmedInputs = _.some(txp.inputs, function(input) {
return input.confirmations == 0;
});
self._processTxNotes(txp.note);
});
};
/**
* Seed from random
*
* @param {Object} opts
* @param {String} opts.coin - default 'btc'
* @param {String} opts.network - default 'livenet'
*/
API.prototype.seedFromRandom = function(opts) {
$.checkArgument(arguments.length <= 1, 'DEPRECATED: only 1 argument accepted.');
$.checkArgument(_.isUndefined(opts) || _.isObject(opts), 'DEPRECATED: argument should be an options object.');
opts = opts || {};
this.credentials = Credentials.create(opts.coin || 'btc', opts.network || 'livenet');
this.request.setCredentials(this.credentials);
};
var _deviceValidated;
/**
* Seed from random
*
* @param {Object} opts
* @param {String} opts.passphrase
* @param {Boolean} opts.skipDeviceValidation
*/
API.prototype.validateKeyDerivation = function(opts, cb) {
var self = this;
opts = opts || {};
var c = self.credentials;
function testMessageSigning(xpriv, xpub) {
var nonHardenedPath = 'm/0/0';
var message = 'Lorem ipsum dolor sit amet, ne amet urbanitas percipitur vim, libris disputando his ne, et facer suavitate qui. Ei quidam laoreet sea. Cu pro dico aliquip gubergren, in mundi postea usu. Ad labitur posidonium interesset duo, est et doctus molestie adipiscing.';
var priv = xpriv.deriveChild(nonHardenedPath).privateKey;
var signature = Utils.signMessage(message, priv);
var pub = xpub.deriveChild(nonHardenedPath).publicKey;
return Utils.verifyMessage(message, signature, pub);
};
function testHardcodedKeys() {
var words = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about";
var xpriv = Mnemonic(words).toHDPrivateKey();
if (xpriv.toString() != 'xprv9s21ZrQH143K3GJpoapnV8SFfukcVBSfeCficPSGfubmSFDxo1kuHnLisriDvSnRRuL2Qrg5ggqHKNVpxR86QEC8w35uxmGoggxtQTPvfUu') return false;
xpriv = xpriv.deriveChild("m/44'/0'/0'");
if (xpriv.toString() != 'xprv9xpXFhFpqdQK3TmytPBqXtGSwS3DLjojFhTGht8gwAAii8py5X6pxeBnQ6ehJiyJ6nDjWGJfZ95WxByFXVkDxHXrqu53WCRGypk2ttuqncb') return false;
var xpub = Bitcore.HDPublicKey.fromString('xpub6BosfCnifzxcFwrSzQiqu2DBVTshkCXacvNsWGYJVVhhawA7d4R5WSWGFNbi8Aw6ZRc1brxMyWMzG3DSSSSoekkudhUd9yLb6qx39T9nMdj');
return testMessageSigning(xpriv, xpub);
};
function testLiveKeys() {
var words;
try {
words = c.getMnemonic();
} catch (ex) {}
var xpriv;
if (words && (!c.mnemonicHasPassphrase || opts.passphrase)) {
var m = new Mnemonic(words);
xpriv = m.toHDPrivateKey(opts.passphrase, c.network);
}
if (!xpriv) {
xpriv = new Bitcore.HDPrivateKey(c.xPrivKey);
}
xpriv = xpriv.deriveChild(c.getBaseAddressDerivationPath());
var xpub = new Bitcore.HDPublicKey(c.xPubKey);
return testMessageSigning(xpriv, xpub);
};
var hardcodedOk = true;
if (!_deviceValidated && !opts.skipDeviceValidation) {
hardcodedOk = testHardcodedKeys();
_deviceValidated = true;
}
var liveOk = (c.canSign() && !c.isPrivKeyEncrypted()) ? testLiveKeys() : true;
self.keyDerivationOk = hardcodedOk && liveOk;
return cb(null, self.keyDerivationOk);
};
/**
* Seed from random with mnemonic
*
* @param {Object} opts
* @param {String} opts.coin - default 'btc'
* @param {String} opts.network - default 'livenet'
* @param {String} opts.passphrase
* @param {Number} opts.language - default 'en'
* @param {Number} opts.account - default 0
*/
API.prototype.seedFromRandomWithMnemonic = function(opts) {
$.checkArgument(arguments.length <= 1, 'DEPRECATED: only 1 argument accepted.');
$.checkArgument(_.isUndefined(opts) || _.isObject(opts), 'DEPRECATED: argument should be an options object.');
opts = opts || {};
this.credentials = Credentials.createWithMnemonic(opts.coin || 'btc', opts.network || 'livenet', opts.passphrase, opts.language || 'en', opts.account || 0);
this.request.setCredentials(this.credentials);
};
API.prototype.getMnemonic = function() {
return this.credentials.getMnemonic();
};
API.prototype.mnemonicHasPassphrase = function() {
return this.credentials.mnemonicHasPassphrase;
};
API.prototype.clearMnemonic = function() {
return this.credentials.clearMnemonic();
};
/**
* Seed from extended private key
*
* @param {String} xPrivKey
* @param {String} opts.coin - default 'btc'
* @param {Number} opts.account - default 0
* @param {String} opts.derivationStrategy - default 'BIP44'
*/
API.prototype.seedFromExtendedPrivateKey = function(xPrivKey, opts) {
opts = opts || {};
this.credentials = Credentials.fromExtendedPrivateKey(opts.coin || 'btc', xPrivKey, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, opts);
this.request.setCredentials(this.credentials);
};
/**
* Seed from Mnemonics (language autodetected)
* Can throw an error if mnemonic is invalid
*
* @param {String} BIP39 words
* @param {Object} opts
* @param {String} opts.coin - default 'btc'
* @param {String} opts.network - default 'livenet'
* @param {String} opts.passphrase
* @param {Number} opts.account - default 0
* @param {String} opts.derivationStrategy - default 'BIP44'
*/
API.prototype.seedFromMnemonic = function(words, opts) {
$.checkArgument(_.isUndefined(opts) || _.isObject(opts), 'DEPRECATED: second argument should be an options object.');
opts = opts || {};
this.credentials = Credentials.fromMnemonic(opts.coin || 'btc', opts.network || 'livenet', words, opts.passphrase, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, opts);
this.request.setCredentials(this.credentials);
};
/**
* Seed from external wallet public key
*
* @param {String} xPubKey
* @param {String} source - A name identifying the source of the xPrivKey (e.g. ledger, TREZOR, ...)
* @param {String} entropySourceHex - A HEX string containing pseudo-random data, that can be deterministically derived from the xPrivKey, and should not be derived from xPubKey.
* @param {Object} opts
* @param {String} opts.coin - default 'btc'
* @param {Number} opts.account - default 0
* @param {String} opts.derivationStrategy - default 'BIP44'
*/
API.prototype.seedFromExtendedPublicKey = function(xPubKey, source, entropySourceHex, opts) {
$.checkArgument(_.isUndefined(opts) || _.isObject(opts));
opts = opts || {};
this.credentials = Credentials.fromExtendedPublicKey(opts.coin || 'btc', xPubKey, source, entropySourceHex, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44);
this.request.setCredentials(this.credentials);
};
/**
* Export wallet
*
* @param {Object} opts
* @param {Boolean} opts.password
* @param {Boolean} opts.noSign
*/
API.prototype.export = function(opts) {
$.checkState(this.credentials);
opts = opts || {};
var output;
var c = Credentials.fromObj(this.credentials);
if (opts.noSign) {
c.setNoSign();
} else if (opts.password) {
c.decryptPrivateKey(opts.password);
}
output = JSON.stringify(c.toObj());
return output;
};
/**
* Import wallet
*
* @param {Object} str - The serialized JSON created with #export
*/
API.prototype.import = function(str) {
try {
var credentials = Credentials.fromObj(JSON.parse(str));
this.credentials = credentials;
} catch (ex) {
throw new Errors.INVALID_BACKUP;
}
this.request.setCredentials(this.credentials);
};
API.prototype._import = function(cb) {
$.checkState(this.credentials);
var self = this;
// First option, grab wallet info from BWS.
self.openWallet(function(err, ret) {
// it worked?
if (!err) return cb(null, ret);
// Is the error other than "copayer was not found"? || or no priv key.
if (err instanceof Errors.NOT_AUTHORIZED || self.isPrivKeyExternal())
return cb(err);
//Second option, lets try to add an access
log.info('Copayer not found, trying to add access');
self.addAccess({}, function(err) {
if (err) {
return cb(new Errors.WALLET_DOES_NOT_EXIST);
}
self.openWallet(cb);
});
});
};
/**
* Import from Mnemonics (language autodetected)
* Can throw an error if mnemonic is invalid
* Will try compliant and non-compliantDerivation
*
* @param {String} BIP39 words
* @param {Object} opts
* @param {String} opts.coin - default 'btc'
* @param {String} opts.network - default 'livenet'
* @param {String} opts.passphrase
* @param {Number} opts.account - default 0
* @param {String} opts.derivationStrategy - default 'BIP44'
* @param {String} opts.entropySourcePath - Only used if the wallet was created on a HW wallet, in which that private keys was not available for all the needed derivations
* @param {String} opts.walletPrivKey - if available, walletPrivKey for encrypting metadata
*/
API.prototype.importFromMnemonic = function(words, opts, cb) {
log.debug('Importing from Mnemonic');
var self = this;
opts = opts || {};
opts.coin = opts.coin || 'btc';
function derive(nonCompliantDerivation, useLegacyCoinType) {
return Credentials.fromMnemonic(opts.coin, opts.network || 'livenet', words, opts.passphrase, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, {
nonCompliantDerivation: nonCompliantDerivation,
entropySourcePath: opts.entropySourcePath,
walletPrivKey: opts.walletPrivKey,
useLegacyCoinType,
});
};
try {
self.credentials = derive();
} catch (e) {
log.info('Mnemonic error:', e);
return cb(new Errors.INVALID_BACKUP);
}
this.request.setCredentials(this.credentials);
self._import(function(err, ret) {
if (!err) return cb(null, ret);
if (err instanceof Errors.INVALID_BACKUP) return cb(err);
if (err instanceof Errors.NOT_AUTHORIZED || err instanceof Errors.WALLET_DOES_NOT_EXIST) {
var altCredentials;
// Only BTC wallets can be nonCompliantDerivation
switch(opts.coin) {
case 'btc':
// try using nonCompliantDerivation
altCredentials = derive(true);
break;
case 'bch':
// try using 0 as coin for BCH (old wallets)
altCredentials = derive(false, true);
break;
default:
return cb(err);
}
if (altCredentials.xPubKey.toString() == self.credentials.xPubKey.toString())
return cb(err);
self.credentials = altCredentials;
self.request.setCredentials(self.credentials);
return self._import(cb);
}
return cb(err);
});
};
/*
* Import from extended private key
*
* @param {String} xPrivKey
* @param {String} opts.coin - default 'btc'
* @param {Number} opts.account - default 0
* @param {String} opts.derivationStrategy - default 'BIP44'
* @param {String} opts.compliantDerivation - default 'true'
* @param {String} opts.walletPrivKey - if available, walletPrivKey for encrypting metadata
* @param {Callback} cb - The callback that handles the response. It returns a flag indicating that the wallet is imported.
*/
API.prototype.importFromExtendedPrivateKey = function(xPrivKey, opts, cb) {
log.debug('Importing from Extended Private Key');
if (!cb) {
cb = opts;
opts = {};
log.warn('DEPRECATED WARN: importFromExtendedPrivateKey should receive 3 parameters.');
}
try {
this.credentials = Credentials.fromExtendedPrivateKey(opts.coin || 'btc', xPrivKey, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, opts);
} catch (e) {
log.info('xPriv error:', e);
return cb(new Errors.INVALID_BACKUP);
};
this.request.setCredentials(this.credentials);
this._import(cb);
};
/**
* Import from Extended Public Key
*
* @param {String} xPubKey
* @param {String} source - A name identifying the source of the xPrivKey
* @param {String} entropySourceHex - A HEX string containing pseudo-random data, that can be deterministically derived from the xPrivKey, and should not be derived from xPubKey.
* @param {Object} opts
* @param {String} opts.coin - default 'btc'
* @param {Number} opts.account - default 0
* @param {String} opts.derivationStrategy - default 'BIP44'
* @param {String} opts.compliantDerivation - default 'true'
*/
API.prototype.importFromExtendedPublicKey = function(xPubKey, source, entropySourceHex, opts, cb) {
$.checkArgument(arguments.length == 5, "DEPRECATED: should receive 5 arguments");
$.checkArgument(_.isUndefined(opts) || _.isObject(opts));
$.shouldBeFunction(cb);
opts = opts || {};
log.debug('Importing from Extended Private Key');
try {
this.credentials = Credentials.fromExtendedPublicKey(opts.coin || 'btc', xPubKey, source, entropySourceHex, opts.account || 0, opts.derivationStrategy || Constants.DERIVATION_STRATEGIES.BIP44, opts);
} catch (e) {
log.info('xPriv error:', e);
return cb(new Errors.INVALID_BACKUP);
};
this.request.setCredentials(this.credentials);
this._import(cb);
};
API.prototype.decryptBIP38PrivateKey = function(encryptedPrivateKeyBase58, passphrase, opts, cb) {
var Bip38 = require('bip38');
var bip38 = new Bip38();
var privateKeyWif;
try {
privateKeyWif = bip38.decrypt(encryptedPrivateKeyBase58, passphrase);
} catch (ex) {
return cb(new Error('Could not decrypt BIP38 private key', ex));
}
var privateKey = new Bitcore.PrivateKey(privateKeyWif);
var address = privateKey.publicKey.toAddress().toString();
var addrBuff = new Buffer(address, 'ascii');
var actualChecksum = Bitcore.crypto.Hash.sha256sha256(addrBuff).toString('hex').substring(0, 8);
var expectedChecksum = Bitcore.encoding.Base58Check.decode(encryptedPrivateKeyBase58).toString('hex').substring(6, 14);
if (actualChecksum != expectedChecksum)
return cb(new Error('Incorrect passphrase'));
return cb(null, privateKeyWif);
};
API.prototype.getBalanceFromPrivateKey = function(privateKey, coin, cb) {
var self = this;
if (_.isFunction(coin)) {
cb = coin;
coin = 'btc';
}
var B = Bitcore_[coin];
var privateKey = new B.PrivateKey(privateKey);
var address = privateKey.publicKey.toAddress();
self.getUtxos({
addresses: coin == 'bch' ? address.toLegacyAddress() : address.toString(),
}, function(err, utxos) {
if (err) return cb(err);
return cb(null, _.sumBy(utxos, 'satoshis'));
});
};
API.prototype.buildTxFromPrivateKey = function(privateKey, destinationAddress, opts, cb) {
var self = this;
opts = opts || {};
var coin = opts.coin || 'btc';
var B = Bitcore_[coin];
var privateKey = B.PrivateKey(privateKey);
var address = privateKey.publicKey.toAddress();
async.waterfall([
function(next) {
self.getUtxos({
addresses: coin == 'bch' ? address.toLegacyAddress() : address.toString(),
}, function(err, utxos) {
return next(err, utxos);
});
},
function(utxos, next) {
if (!_.isArray(utxos) || utxos.length == 0) return next(new Error('No utxos found'));
var fee = opts.fee || 10000;
var amount = _.sumBy(utxos, 'satoshis') - fee;
if (amount <= 0) return next(new Errors.INSUFFICIENT_FUNDS);
var tx;
try {
var toAddress = B.Address.fromString(destinationAddress);
tx = new B.Transaction()
.from(utxos)
.to(toAddress, amount)
.fee(fee)
.sign(privateKey);
// Make sure the tx can be serialized
tx.serialize();
} catch (ex) {
log.error('Could not build transaction from private key', ex);
return next(new Errors.COULD_NOT_BUILD_TRANSACTION);
}
return next(null, tx);
}
], cb);
};
/**
* Open a wallet and try to complete the public key ring.
*
* @param {Callback} cb - The callback that handles the response. It returns a flag indicating that the wallet is complete.
* @fires API#walletCompleted
*/
API.prototype.openWallet = function(cb) {
$.checkState(this.credentials);
var self = this;
if (self.credentials.isComplete() && self.credentials.hasWalletInfo())
return cb(null, true);
var qs = [];
qs.push('includeExtendedInfo=1');
qs.push('serverMessageArray=1');
self.request.get('/v3/wallets/?' + qs.join('&'), function(err, ret) {
if (err) return cb(err);
var wallet = ret.wallet;
self._processStatus(ret);
if (!self.credentials.hasWalletInfo()) {
var me = _.find(wallet.copayers, {
id: self.credentials.copayerId
});
self.credentials.addWalletInfo(wallet.id, wallet.name, wallet.m, wallet.n, me.name);
}
if (wallet.status != 'complete')
return cb();
if (self.credentials.walletPrivKey) {
if (!Verifier.checkCopayers(self.credentials, wallet.copayers)) {
return cb(new Errors.SERVER_COMPROMISED);
}
} else {
// this should only happen in AIR-GAPPED flows
log.warn('Could not verify copayers key (missing wallet Private Key)');
}
self.credentials.addPublicKeyRing(API._extractPublicKeyRing(wallet.copayers));
self.emit('walletCompleted', wallet);
return cb(null, ret);
});
};
API._buildSecret = function(walletId, walletPrivKey, coin, network) {
if (_.isString(walletPrivKey)) {
walletPrivKey = Bitcore.PrivateKey.fromString(walletPrivKey);
}
var widHex = new Buffer(walletId.replace(/-/g, ''), 'hex');
var widBase58 = new Bitcore.encoding.Base58(widHex).toString();
return _.padEnd(widBase58, 22, '0') + walletPrivKey.toWIF() + (network == 'testnet' ? 'T' : 'L') + coin;
};
API.parseSecret = function(secret) {
$.checkArgument(secret);
function split(str, indexes) {
var parts = [];
indexes.push(str.length);
var i = 0;
while (i < indexes.length) {
parts.push(str.substring(i == 0 ? 0 : indexes[i - 1], indexes[i]));
i++;
};
return parts;
};
try {
var secretSplit = split(secret, [22, 74, 75]);
var widBase58 = secretSplit[0].replace(/0/g, '');
var widHex = Bitcore.encoding.Base58.decode(widBase58).toString('hex');
var walletId = split(widHex, [8, 12, 16, 20]).join('-');
var walletPrivKey = Bitcore.PrivateKey.fromString(secretSplit[1]);
var networkChar = secretSplit[2];
var coin = secretSplit[3] || 'btc';
return {
walletId: walletId,
walletPrivKey: walletPrivKey,
coin: coin,
network: networkChar == 'T' ? 'testnet' : 'livenet',
};
} catch (ex) {
throw new Error('Invalid secret');
}
};
API.getRawTx = function(txp) {
var t = Utils.buildTx(txp);
return t.uncheckedSerialize();
};
API.signTxp = function(txp, derivedXPrivKey) {
//Derive proper key to sign, for each input
var privs = [];
var derived = {};
var xpriv = new Bitcore.HDPrivateKey(derivedXPrivKey);
_.each(txp.inputs, function(i) {
$.checkState(i.path, "Input derivation path not available (signing transaction)")
if (!derived[i.path]) {
derived[i.path] = xpriv.deriveChild(i.path).privateKey;
privs.push(derived[i.path]);
}
});
var t = Utils.buildTx(txp);
var signatures = _.map(privs, function(priv, i) {
return t.getSignatures(priv);
});
signatures = _.map(_.sortBy(_.flatten(signatures), 'inputIndex'), function(s) {
return s.signature.toDER().toString('hex');
});
return signatures;
};
API.prototype._signTxp = function(txp, password) {
var derived = this.credentials.getDerivedXPrivKey(password);
return API.signTxp(txp, derived);
};
API.prototype._getCurrentSignatures = function(txp) {
var acceptedActions = _.filter(txp.actions, {
type: 'accept'
});
return _.map(acceptedActions, function(x) {
return {
signatures: x.signatures,
xpub: x.xpub,
};
});
};
API.prototype._addSignaturesToBitcoreTx = function(txp, t, signatures, xpub) {
if (signatures.length != txp.inputs.length)
throw new Error('Number of signatures does not match number of inputs');
$.checkState(txp.coin);
var bitcore = Bitcore_[txp.coin];
var i = 0,
x = new bitcore.HDPublicKey(xpub);
_.each(signatures, function(signatureHex) {
var input = txp.inputs[i];
try {
var signature = bitcore.crypto.Signature.fromString(signatureHex);
var pub = x.deriveChild(txp.inputPaths[i]).publicKey;
var s = {
inputIndex: i,
signature: signature,
sigtype: bitcore.crypto.Signature.SIGHASH_ALL | bitcore.crypto.Signature.SIGHASH_FORKID,
publicKey: pub,
}
;
t.inputs[i].addSignature(t, s);
i++;
} catch (e) {} ;
});
if (i != txp.inputs.length)
throw new Error('Wrong signatures');
};
API.prototype._applyAllSignatures = function(txp, t) {
var self = this;
$.checkState(txp.status == 'accepted');
var sigs = self._getCurrentSignatures(txp);
_.each(sigs, function(x) {
self._addSignaturesToBitcoreTx(txp, t, x.signatures, x.xpub);
});
};
/**
* Join
* @private
*
* @param {String} walletId
* @param {String} walletPrivKey
* @param {String} xPubKey
* @param {String} requestPubKey
* @param {String} copayerName
* @param {Object} Optional args
* @param {String} opts.customData
* @param {String} opts.coin
* @param {Callback} cb
*/
API.prototype._doJoinWallet = function(walletId, walletPrivKey, xPubKey, requestPubKey, copayerName, opts, cb) {
$.shouldBeFunction(cb);
var self = this;
opts = opts || {};
// Adds encrypted walletPrivateKey to CustomData
opts.customData = opts.customData || {};
opts.customData.walletPrivKey = walletPrivKey.toString();
var encCustomData = Utils.encryptMessage(JSON.stringify(opts.customData), this.credentials.personalEncryptingKey);
var encCopayerName = Utils.encryptMessage(copayerName, this.credentials.sharedEncryptingKey);
var args = {
walletId: walletId,
coin: opts.coin,
name: encCopayerName,
xPubKey: xPubKey,
requestPubKey: requestPubKey,
customData: encCustomData,
};
if (opts.dryRun) args.dryRun = true;
if (_.isBoolean(opts.supportBIP44AndP2PKH))
args.supportBIP44AndP2PKH = opts.supportBIP44AndP2PKH;
var hash = Utils.getCopayerHash(args.name, args.xPubKey, args.requestPubKey);
args.copayerSignature = Utils.signMessage(hash, walletPrivKey);
var url = '/v2/wallets/' + walletId + '/copayers';
this.request.post(url, args, function(err, body) {
if (err) return cb(err);
self._processWallet(body.wallet);
return cb(null, body.wallet);
});
};
/**
* Return if wallet is complete
*/
API.prototype.isComplete = function() {
return this.credentials && this.credentials.isComplete();
};
/**
* Is private key currently encrypted?
*
* @return {Boolean}
*/
API.prototype.isPrivKeyEncrypted = function() {
return this.credentials && this.credentials.isPrivKeyEncrypted();
};
/**
* Is private key external?
*
* @return {Boolean}
*/
API.prototype.isPrivKeyExternal = function() {
return this.credentials && this.credentials.hasExternalSource();
};
/**
* Get external wallet source name
*
* @return {String}
*/
API.prototype.getPrivKeyExternalSourceName = function() {
return this.credentials ? this.credentials.getExternalSourceName() : null;
};
/**
* Returns unencrypted extended private key and mnemonics
*
* @param password
*/
API.prototype.getKeys = function(password) {
return this.credentials.getKeys(password);
};
/**
* Checks is password is valid
* Returns null (keys not encrypted), true or false.
*
* @param password
*/
API.prototype.checkPassword = function(password) {
if (!this.isPrivKeyEncrypted()) return;
try {
var keys = this.getKeys(password);
return !!keys.xPrivKey;
} catch (e) {
return false;
};
};
/**
* Can this credentials sign a transaction?
* (Only returns fail on a 'proxy' setup for airgapped operation)
*
* @return {undefined}
*/
API.prototype.canSign = function() {
return this.credentials && this.credentials.canSign();
};
API._extractPublicKeyRing = function(copayers) {
return _.map(copayers, function(copayer) {
var pkr = _.pick(copayer, ['xPubKey', 'requestPubKey']);
pkr.copayerName = copayer.name;
return pkr;
});
};
/**
* sets up encryption for the extended private key
*
* @param {String} password Password used to encrypt
* @param {Object} opts optional: SJCL options to encrypt (.iter, .salt, etc).
* @return {undefined}
*/
API.prototype.encryptPrivateKey = function(password, opts) {
this.credentials.encryptPrivateKey(password, opts || API.privateKeyEncryptionOpts);
};
/**
* disables encryption for private key.
*
* @param {String} password Password used to encrypt
*/
API.prototype.decryptPrivateKey = function(password) {
return this.credentials.decryptPrivateKey(password);
};
/**
* Get current fee levels for the specified network
*
* @param {string} coin - 'btc' (default) or 'bch'
* @param {string} network - 'livenet' (default) or 'testnet'
* @param {Callback} cb
* @returns {Callback} cb - Returns error or an object with status information
*/
API.prototype.getFeeLevels = function(coin, network, cb) {
var self = this;
$.checkArgument(coin || _.includes(['btc', 'bch'], coin));
$.checkArgument(network || _.includes(['livenet', 'testnet'], network));
self.request.get('/v2/feelevels/?coin=' + (coin || 'btc') + '&network=' + (network || 'livenet'), function(err, result) {
if (err) return cb(err);
return cb(err, result);
});
};
/**
* Get service version
*
* @param {Callback} cb
*/
API.prototype.getVersion = function(cb) {
this.request.get('/v1/version/', cb);
};
API.prototype._checkKeyDerivation = function() {
var isInvalid = (this.keyDerivationOk === false);
if (isInvalid) {
log.error('Key derivation for this device is not working as expected');
}
return !isInvalid;
};
/**
*
* Create a wallet.
* @param {String} walletName
* @param {String} copayerName
* @param {Number} m
* @param {Number} n
* @param {object} opts (optional: advanced options)
* @param {string} opts.coin[='btc'] - The coin for this wallet (btc, bch).
* @param {string} opts.network[='livenet']
* @param {string} opts.singleAddress[=false] - The wallet will only ever have one address.
* @param {String} opts.walletPrivKey - set a walletPrivKey (instead of random)
* @param {String} opts.id - set a id for wallet (instead of server given)
* @param cb
* @return {undefined}
*/
API.prototype.createWallet = function(walletName, copayerName, m, n, opts, cb) {
var self = this;
if (!self._checkKeyDerivation()) return cb(new Error('Cannot create new wallet'));
if (opts) $.shouldBeObject(opts);
opts = opts || {};
var coin = opts.coin || 'btc';
if (!_.includes(['btc', 'bch'], coin)) return cb(new Error('Invalid coin'));
var network = opts.network || 'livenet';
if (!_.includes(['testnet', 'livenet'], network)) return cb(new Error('Invalid network'));
if (!self.credentials) {
return cb(new Error('Generate keys first using seedFrom*'));
}
if (coin != self.credentials.coin) {
return cb(new Error('Existing keys were created for a different coin'));
}
if (network != self.credentials.network) {
return cb(new Error('Existing keys were created for a different network'));
}
var walletPrivKey = opts.walletPrivKey || new Bitcore.PrivateKey();
var c = self.credentials;
c.addWalletPrivateKey(walletPrivKey.toString());
var encWalletName = Utils.encryptMessage(walletName, c.sharedEncryptingKey);
var args = {
name: encWalletName,
m: m,
n: n,
pubKey: (new Bitcore.PrivateKey(walletPrivKey)).toPublicKey().toString(),
coin: coin,
network: network,
singleAddress: !!opts.singleAddress,
id: opts.id,
};
self.request.post('/v2/wallets/', args, function(err, res) {
if (err) return cb(err);
var walletId = res.walletId;
c.addWalletInfo(walletId, walletName, m, n, copayerName);
var secret = API._buildSecret(c.walletId, c.walletPrivKey, c.coin, c.network);
self._doJoinWallet(walletId, walletPrivKey, c.xPubKey, c.requestPubKey, copayerName, {
coin: coin
},
function(err, wallet) {
if (err) return cb(err);
return cb(null, n > 1 ? secret : null);
});
});
};
/**
* Join an existent wallet
*
* @param {String} secret
* @param {String} copayerName
* @param {Object} opts
* @param {string} opts.coin[='btc'] - The expected coin for this wallet (btc, bch).
* @param {Boolean} opts.dryRun[=false] - Simulate wallet join
* @param {Callback} cb
* @returns {Callback} cb - Returns the wallet
*/
API.prototype.joinWallet = function(secret, copayerName, opts, cb) {
var self = this;
if (!cb) {
cb = opts;
opts = {};
log.warn('DEPRECATED WARN: joinWallet should receive 4 parameters.');
}
if (!self._checkKeyDerivation()) return cb(new Error('Cannot join wallet'));
opts = opts || {};
var coin = opts.coin || 'btc';
if (!_.includes(['btc', 'bch'], coin)) return cb(new Error('Invalid coin'));
try {
var secretData = API.parseSecret(secret);
} catch (ex) {
return cb(ex);
}
if (!self.credentials) {
self.seedFromRandom({
coin: coin,
network: secretData.network
});
}
self.credentials.addWalletPrivateKey(secretData.walletPrivKey.toString());
self._doJoinWallet(secretData.walletId, secretData.walletPrivKey, self.credentials.xPubKey, self.credentials.requestPubKey, copayerName, {
coin: coin,
dryRun: !!opts.dryRun,
}, function(err, wallet) {
if (err) return cb(err);
if (!opts.dryRun) {
self.credentials.addWalletInfo(wallet.id, wallet.name, wallet.m, wallet.n, copayerName);
}
return cb(null, wallet);
});
};
/**
* Recreates a wallet, given credentials (with wallet id)
*
* @returns {Callback} cb - Returns the wallet
*/
API.prototype.recreateWallet = function(cb) {
$.checkState(this.credentials);
$.checkState(this.credentials.isComplete());
$.checkState(this.credentials.walletPrivKey);
//$.checkState(this.credentials.hasWalletInfo());
var self = this;
// First: Try to get the wallet with current credentials
this.getStatus({
includeExtendedInfo: true
}, function(err) {
// No error? -> Wallet is ready.
if (!err) {
log.info('Wallet is already created');
return cb();
};
var c = self.credentials;
var walletPrivKey = Bitcore.PrivateKey.fromString(c.walletPrivKey);
var walletId = c.walletId;
var supportBIP44AndP2PKH = c.derivationStrategy != Constants.DERIVATION_STRATEGIES.BIP45;
var encWalletName = Utils.encryptMessage(c.walletName || 'recovered wallet', c.sharedEncryptingKey);
var coin = c.coin;
var args = {
name: encWalletName,
m: c.m,
n: c.n,
pubKey: walletPrivKey.toPublicKey().toString(),
coin: c.coin,
network: c.network,
id: walletId,
supportBIP44AndP2PKH: supportBIP44AndP2PKH,
};
self.request.post('/v2/wallets/', args, function(err, body) {
if (err) {
if (!(err instanceof Errors.WALLET_ALREADY_EXISTS))
return cb(err);
return self.addAccess({}, function(err) {
if (err) return cb(err);
self.openWallet(function(err) {
return cb(err);
});
});
}
if (!walletId) {
walletId = body.walletId;
}
var i = 1;
async.each(self.credentials.publicKeyRing, function(item, next) {
var name = item.copayerName || ('copayer ' + i++);
self._doJoinWallet(walletId, walletPrivKey, item.xPubKey, item.requestPubKey, name, {
coin: c.coin,
supportBIP44AndP2PKH: supportBIP44AndP2PKH,
}, function(err) {
//Ignore error is copayer already in wallet
if (err && err instanceof Errors.COPAYER_IN_WALLET) return next();
return next(err);
});
}, cb);
});
});
};
API.prototype._processWallet = function(wallet) {
var self = this;
var encryptingKey = self.credentials.sharedEncryptingKey;
var name = Utils.decryptMessageNoThrow(wallet.name, encryptingKey);
if (name != wallet.name) {
wallet.encryptedName = wallet.name;
}
wallet.name = name;
_.each(wallet.copayers, function(copayer) {
var name = Utils.decryptMessageNoThrow(copayer.name, encryptingKey);
if (name != copayer.name) {
copayer.encryptedName = copayer.name;
}
copayer.name = name;
_.each(copayer.requestPubKeys, function(access) {
if (!access.name) return;
var name = Utils.decryptMessageNoThrow(access.name, encryptingKey);
if (name != access.name) {
access.encryptedName = access.name;
}
access.name = name;
});
});
};
API.prototype._processStatus = function(status) {
var self = this;
function processCustomData(data) {
var copayers = data.wallet.copayers;
if (!copayers) return;
var me = _.find(copayers, {
'id': self.credentials.copayerId
});
if (!me || !me.customData) return;
var customData;
try {
customData = JSON.parse(Utils.decryptMessage(me.customData, self.credentials.personalEncryptingKey));
} catch (e) {
log.warn('Could not decrypt customData:', me.customData);
}
if (!customData) return;
// Add it to result
data.customData = customData;
// Update walletPrivateKey
if (!self.credentials.walletPrivKey && customData.walletPrivKey)
self.credentials.addWalletPrivateKey(customData.walletPrivKey);
};
processCustomData(status);
self._processWallet(status.wallet);
self._processTxps(status.pendingTxps);
}
/**
* Get latest notifications
*
* @param {object} opts
* @param {String} opts.lastNotificationId (optional) - The ID of the last received notification
* @param {String} opts.timeSpan (optional) - A time window on which to look for notifications (in seconds)
* @param {String} opts.includeOwn[=false] (optional) - Do not ignore notifications generated by the current copayer
* @returns {Callback} cb - Returns error or an array of notifications
*/
API.prototype.getNotifications = function(opts, cb) {
$.checkState(this.credentials);
var self = this;
opts = opts || {};
var url = '/v1/notifications/';
if (opts.lastNotificationId) {
url += '?notificationId=' + opts.lastNotificationId;
} else if (opts.timeSpan) {
url += '?timeSpan=' + opts.timeSpan;
}
self.request.getWithLogin(url, function(err, result) {
if (err) return cb(err);
var notifications = _.filter(result, function(notification) {
return opts.includeOwn || (notification.creatorId != self.credentials.copayerId);
});
return cb(null, notifications);
});
};
/**
* Get status of the wallet
*
* @param {Boolean} opts.twoStep[=false] - Optional: use 2-step balance computation for improved performance
* @param {Boolean} opts.includeExtendedInfo (optional: query extended status)
* @returns {Callback} cb - Returns error or an object with status information
*/
API.prototype.getStatus = function(opts, cb) {
$.checkState(this.credentials);
if (!cb) {
cb = opts;
opts = {};
log.warn('DEPRECATED WARN: getStatus should receive 2 parameters.')
}
var self = this;
opts = opts || {};
var qs = [];
qs.push('includeExtendedInfo=' + (opts.includeExtendedInfo ? '1' : '0'));
qs.push('twoStep=' + (opts.twoStep ? '1' : '0'));
qs.push('serverMessageArray=1');
self.request.get('/v3/wallets/?' + qs.join('&'), function(err, result) {
if (err) return cb(err);
if (result.wallet.status == 'pending') {
var c = self.credentials;
result.wallet.secret = API._buildSecret(c.walletId, c.walletPrivKey, c.coin, c.network);
}
self._processStatus(result);
return cb(err, result);
});
};
/**
* Get copayer preferences
*
* @param {Callback} cb
* @return {Callback} cb - Return error or object
*/
API.prototype.getPreferences = function(cb) {
$.checkState(this.credentials);
$.checkArgument(cb);
var self = this;
self.request.get('/v1/preferences/', function(err, preferences) {
if (err) return cb(err);
return cb(null, preferences);
});
};
/**
* Save copayer preferences
*
* @param {Object} preferences
* @param {Callback} cb
* @return {Callback} cb - Return error or object
*/
API.prototype.savePreferences = function(preferences, cb) {
$.checkState(this.credentials);
$.checkArgument(cb);
var self = this;
self.request.put('/v1/preferences/', preferences, cb);
};
/**
* fetchPayPro
*
* @param opts.payProUrl URL for paypro request
* @returns {Callback} cb - Return error or the parsed payment protocol request
* Returns (err,paypro)
* paypro.amount
* paypro.toAddress
* paypro.memo
*/
API.prototype.fetchPayPro = function(opts, cb) {
$.checkArgument(opts)
.checkArgument(opts.payProUrl);
PayPro.get({
url: opts.payProUrl,
coin: this.credentials.coin || 'btc',
network: this.credentials.network || 'livenet',
// for testing
request: this.request,
}, function(err, paypro) {
if (err)
return cb(err);
return cb(null, paypro);
});
};
/**
* Gets list of utxos
*
* @param {Function} cb
* @param {Object} opts
* @param {Array} opts.addresses (optional) - List of addresses from where to fetch UTXOs.
* @returns {Callback} cb - Return error or the list of utxos
*/
API.prototype.getUtxos = function(opts, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
opts = opts || {};
var url = '/v1/utxos/';
if (opts.addresses) {
url += '?' + querystring.stringify({
addresses: [].concat(opts.addresses).join(',')
});
}
this.request.get(url, cb);
};
API.prototype._getCreateTxProposalArgs = function(opts) {
var self = this;
var args = _.cloneDeep(opts);
args.message = API._encryptMessage(opts.message, this.credentials.sharedEncryptingKey) || null;
args.payProUrl = opts.payProUrl || null;
_.each(args.outputs, function(o) {
o.message = API._encryptMessage(o.message, self.credentials.sharedEncryptingKey) || null;
});
return args;
};
/**
* Create a transaction proposal
*
* @param {Object} opts
* @param {string} opts.txProposalId - Optional. If provided it will be used as this TX proposal ID. Should be unique in the scope of the wallet.
* @param {Array} opts.outputs - List of outputs.
* @param {string} opts.outputs[].toAddress - Destination address.
* @param {number} opts.outputs[].amount - Amount to transfer in satoshi.
* @param {string} opts.outputs[].message - A message to attach to this output.
* @param {string} opts.message - A message to attach to this transaction.
* @param {number} opts.feeLevel[='normal'] - Optional. Specify the fee level for this TX ('priority', 'normal', 'economy', 'superEconomy').
* @param {number} opts.feePerKb - Optional. Specify the fee per KB for this TX (in satoshi).
* @param {string} opts.changeAddress - Optional. Use this address as the change address for the tx. The address should belong to the wallet. In the case of singleAddress wallets, the first main address will be used.
* @param {Boolean} opts.sendMax - Optional. Send maximum amount of funds that make sense under the specified fee/feePerKb conditions. (defaults to false).
* @param {string} opts.payProUrl - Optional. Paypro URL for peers to verify TX
* @param {Boolean} opts.excludeUnconfirmedUtxos[=false] - Optional. Do not use UTXOs of unconfirmed transactions as inputs
* @param {Boolean} opts.validateOutputs[=true] - Optional. Perform validation on outputs.
* @param {Boolean} opts.dryRun[=false] - Optional. Simulate the action but do not change server state.
* @param {Array} opts.inputs - Optional. Inputs for this TX
* @param {number} opts.fee - Optional. Use an fixed fee for this TX (only when opts.inputs is specified)
* @param {Boolean} opts.noShuffleOutputs - Optional. If set, TX outputs won't be shuffled. Defaults to false
* @returns {Callback} cb - Return error or the transaction proposal
*/
API.prototype.createTxProposal = function(opts, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
$.checkState(this.credentials.sharedEncryptingKey);
$.checkArgument(opts);
var self = this;
var args = self._getCreateTxProposalArgs(opts);
self.request.post('/v3/txproposals/', args, function(err, txp) {
if (err) return cb(err);
self._processTxps(txp);
if (!Verifier.checkProposalCreation(args, txp, self.credentials.sharedEncryptingKey)) {
return cb(new Errors.SERVER_COMPROMISED);
}
return cb(null, txp);
});
};
/**
* Publish a transaction proposal
*
* @param {Object} opts
* @param {Object} opts.txp - The transaction proposal object returned by the API#createTxProposal method
* @returns {Callback} cb - Return error or null
*/
API.prototype.publishTxProposal = function(opts, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
$.checkArgument(opts)
.checkArgument(opts.txp);
$.checkState(parseInt(opts.txp.version) >= 3);
var self = this;
var t = Utils.buildTx(opts.txp);
var hash = t.uncheckedSerialize();
var args = {
proposalSignature: Utils.signMessage(hash, self.credentials.requestPrivKey)
};
var url = '/v2/txproposals/' + opts.txp.id + '/publish/';
self.request.post(url, args, function(err, txp) {
if (err) return cb(err);
self._processTxps(txp);
return cb(null, txp);
});
};
/**
* Create a new address
*
* @param {Object} opts
* @param {Boolean} opts.ignoreMaxGap[=false]
* @param {Callback} cb
* @returns {Callback} cb - Return error or the address
*/
API.prototype.createAddress = function(opts, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
var self = this;
if (!cb) {
cb = opts;
opts = {};
log.warn('DEPRECATED WARN: createAddress should receive 2 parameters.')
}
if (!self._checkKeyDerivation()) return cb(new Error('Cannot create new address for this wallet'));
opts = opts || {};
self.request.post('/v4/addresses/', opts, function(err, address) {
if (err) return cb(err);
if (!Verifier.checkAddress(self.credentials, address)) {
return cb(new Errors.SERVER_COMPROMISED);
}
return cb(null, address);
});
};
/**
* Get your main addresses
*
* @param {Object} opts
* @param {Boolean} opts.doNotVerify
* @param {Numeric} opts.limit (optional) - Limit the resultset. Return all addresses by default.
* @param {Boolean} [opts.reverse=false] (optional) - Reverse the order of returned addresses.
* @param {Callback} cb
* @returns {Callback} cb - Return error or the array of addresses
*/
API.prototype.getMainAddresses = function(opts, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
var self = this;
opts = opts || {};
var args = [];
if (opts.limit) args.push('limit=' + opts.limit);
if (opts.reverse) args.push('reverse=1');
var qs = '';
if (args.length > 0) {
qs = '?' + args.join('&');
}
var url = '/v1/addresses/' + qs;
self.request.get(url, function(err, addresses) {
if (err) return cb(err);
if (!opts.doNotVerify) {
var fake = _.some(addresses, function(address) {
return !Verifier.checkAddress(self.credentials, address);
});
if (fake)
return cb(new Errors.SERVER_COMPROMISED);
}
return cb(null, addresses);
});
};
/**
* Update wallet balance
*
* @param {String} opts.coin - Optional: defaults to current wallet coin
* @param {Callback} cb
*/
API.prototype.getBalance = function(opts, cb) {
if (!cb) {
cb = opts;
opts = {};
log.warn('DEPRECATED WARN: getBalance should receive 2 parameters.')
}
var self = this;
opts = opts || {};
$.checkState(this.credentials && this.credentials.isComplete());
var args = [];
if (opts.coin) {
if (!_.includes(['btc', 'bch'], opts.coin)) return cb(new Error('Invalid coin'));
args.push('coin=' + opts.coin);
}
var qs = '';
if (args.length > 0) {
qs = '?' + args.join('&');
}
var url = '/v1/balance/' + qs;
this.request.get(url, cb);
};
/**
* Get list of transactions proposals
*
* @param {Object} opts
* @param {Boolean} opts.doNotVerify
* @param {Boolean} opts.forAirGapped
* @param {Boolean} opts.doNotEncryptPkr
* @return {Callback} cb - Return error or array of transactions proposals
*/
API.prototype.getTxProposals = function(opts, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
var self = this;
self.request.get('/v2/txproposals/', function(err, txps) {
if (err) return cb(err);
self._processTxps(txps);
async.every(txps,
function(txp, acb) {
if (opts.doNotVerify) return acb(true);
self.getPayPro(txp, function(err, paypro) {
var isLegit = Verifier.checkTxProposal(self.credentials, txp, {
paypro: paypro,
});
return acb(isLegit);
});
},
function(isLegit) {
if (!isLegit)
return cb(new Errors.SERVER_COMPROMISED);
var result;
if (opts.forAirGapped) {
result = {
txps: JSON.parse(JSON.stringify(txps)),
encryptedPkr: opts.doNotEncryptPkr ? null : Utils.encryptMessage(JSON.stringify(self.credentials.publicKeyRing), self.credentials.personalEncryptingKey),
unencryptedPkr: opts.doNotEncryptPkr ? JSON.stringify(self.credentials.publicKeyRing) : null,
m: self.credentials.m,
n: self.credentials.n,
};
} else {
result = txps;
}
return cb(null, result);
});
});
};
//private?
API.prototype.getPayPro = function(txp, cb) {
var self = this;
if (!txp.payProUrl || this.doNotVerifyPayPro)
return cb();
PayPro.get({
url: txp.payProUrl,
coin: txp.coin || 'btc',
network: txp.network || 'livenet',
// for testing
request: self.request,
}, function(err, paypro) {
if (err) return cb(new Error('Could not fetch invoice:' + (err.message? err.message : err)));
return cb(null, paypro);
});
};
/**
* Sign a transaction proposal
*
* @param {Object} txp
* @param {String} password - (optional) A password to decrypt the encrypted private key (if encryption is set).
* @param {Callback} cb
* @return {Callback} cb - Return error or object
*/
API.prototype.signTxProposal = function(txp, password, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
$.checkArgument(txp.creatorId);
if (_.isFunction(password)) {
cb = password;
password = null;
}
var self = this;
if (!txp.signatures) {
if (!self.canSign())
return cb(new Errors.MISSING_PRIVATE_KEY);
if (self.isPrivKeyEncrypted() && !password)
return cb(new Errors.ENCRYPTED_PRIVATE_KEY);
}
self.getPayPro(txp, function(err, paypro) {
if (err) return cb(err);
var isLegit = Verifier.checkTxProposal(self.credentials, txp, {
paypro: paypro,
});
if (!isLegit)
return cb(new Errors.SERVER_COMPROMISED);
var signatures = txp.signatures;
if (_.isEmpty(signatures)) {
try {
signatures = self._signTxp(txp, password);
} catch (ex) {
log.error('Error signing tx', ex);
return cb(ex);
}
}
var url = '/v1/txproposals/' + txp.id + '/signatures/';
var args = {
signatures: signatures
};
self.request.post(url, args, function(err, txp) {
if (err) return cb(err);
self._processTxps(txp);
return cb(null, txp);
});
});
};
/**
* Sign transaction proposal from AirGapped
*
* @param {Object} txp
* @param {String} encryptedPkr
* @param {Number} m
* @param {Number} n
* @param {String} password - (optional) A password to decrypt the encrypted private key (if encryption is set).
* @return {Object} txp - Return transaction
*/
API.prototype.signTxProposalFromAirGapped = function(txp, encryptedPkr, m, n, password) {
$.checkState(this.credentials);
var self = this;
if (!self.canSign())
throw new Errors.MISSING_PRIVATE_KEY;
if (self.isPrivKeyEncrypted() && !password)
throw new Errors.ENCRYPTED_PRIVATE_KEY;
var publicKeyRing;
try {
publicKeyRing = JSON.parse(Utils.decryptMessage(encryptedPkr, self.credentials.personalEncryptingKey));
} catch (ex) {
throw new Error('Could not decrypt public key ring');
}
if (!_.isArray(publicKeyRing) || publicKeyRing.length != n) {
throw new Error('Invalid public key ring');
}
self.credentials.m = m;
self.credentials.n = n;
self.credentials.addressType = txp.addressType;
self.credentials.addPublicKeyRing(publicKeyRing);
if (!Verifier.checkTxProposalSignature(self.credentials, txp))
throw new Error('Fake transaction proposal');
return self._signTxp(txp, password);
};
/**
* Sign transaction proposal from AirGapped
*
* @param {String} key - A mnemonic phrase or an xprv HD private key
* @param {Object} txp
* @param {String} unencryptedPkr
* @param {Number} m
* @param {Number} n
* @param {Object} opts
* @param {String} opts.coin (default 'btc')
* @param {String} opts.passphrase
* @param {Number} opts.account - default 0
* @param {String} opts.derivationStrategy - default 'BIP44'
* @return {Object} txp - Return transaction
*/
API.signTxProposalFromAirGapped = function(key, txp, unencryptedPkr, m, n, opts) {
var self = this;
opts = opts || {}
var coin = opts.coin || 'btc';
if (!_.includes(['btc', 'bch'], coin)) return cb(new Error('Invalid coin'));
var publicKeyRing = JSON.parse(unencryptedPkr);
if (!_.isArray(publicKeyRing) || publicKeyRing.length != n) {
throw new Error('Invalid public key ring');
}
var newClient = new API({
baseUrl: 'https://bws.example.com/bws/api'
});
if (key.slice(0, 4) === 'xprv' || key.slice(0, 4) === 'tprv') {
if (key.slice(0, 4) === 'xprv' && txp.network == 'testnet') throw new Error("testnet HD keys must start with tprv");
if (key.slice(0, 4) === 'tprv' && txp.network == 'livenet') throw new Error("livenet HD keys must start with xprv");
newClient.seedFromExtendedPrivateKey(key, {
'coin': coin,
'account': opts.account,
'derivationStrategy': opts.derivationStrategy
});
} else {
newClient.seedFromMnemonic(key, {
'coin': coin,
'network': txp.network,
'passphrase': opts.passphrase,
'account': opts.account,
'derivationStrategy': opts.derivationStrategy
})
}
newClient.credentials.m = m;
newClient.credentials.n = n;
newClient.credentials.addressType = txp.addressType;
newClient.credentials.addPublicKeyRing(publicKeyRing);
if (!Verifier.checkTxProposalSignature(newClient.credentials, txp))
throw new Error('Fake transaction proposal');
return newClient._signTxp(txp);
};
/**
* Reject a transaction proposal
*
* @param {Object} txp
* @param {String} reason
* @param {Callback} cb
* @return {Callback} cb - Return error or object
*/
API.prototype.rejectTxProposal = function(txp, reason, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
$.checkArgument(cb);
var self = this;
var url = '/v1/txproposals/' + txp.id + '/rejections/';
var args = {
reason: API._encryptMessage(reason, self.credentials.sharedEncryptingKey) || '',
};
self.request.post(url, args, function(err, txp) {
if (err) return cb(err);
self._processTxps(txp);
return cb(null, txp);
});
};
/**
* Broadcast raw transaction
*
* @param {Object} opts
* @param {String} opts.network
* @param {String} opts.rawTx
* @param {Callback} cb
* @return {Callback} cb - Return error or txid
*/
API.prototype.broadcastRawTx = function(opts, cb) {
$.checkState(this.credentials);
$.checkArgument(cb);
var self = this;
opts = opts || {};
var url = '/v1/broadcast_raw/';
self.request.post(url, opts, function(err, txid) {
if (err) return cb(err);
return cb(null, txid);
});
};
API.prototype._doBroadcast = function(txp, cb) {
var self = this;
var url = '/v1/txproposals/' + txp.id + '/broadcast/';
self.request.post(url, {}, function(err, txp) {
if (err) return cb(err);
self._processTxps(txp);
return cb(null, txp);
});
};
/**
* Broadcast a transaction proposal
*
* @param {Object} txp
* @param {Callback} cb
* @return {Callback} cb - Return error or object
*/
API.prototype.broadcastTxProposal = function(txp, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
var self = this;
self.getPayPro(txp, function(err, paypro) {
if (err) return cb(err);
if (paypro) {
var t_unsigned = Utils.buildTx(txp);
var t = Utils.buildTx(txp);
self._applyAllSignatures(txp, t);
PayPro.send({
url: txp.payProUrl,
amountSat: txp.amount,
rawTxUnsigned: t_unsigned.uncheckedSerialize(),
rawTx: t.serialize({
disableSmallFees: true,
disableLargeFees: true,
disableDustOutputs: true
}),
coin: txp.coin || 'btc',
network: txp.network || 'livenet',
// for testing
request: self.request,
}, function(err, ack, memo) {
if (err) {
return cb(err);
}
if (memo) {
log.debug('Merchant memo:', memo);
}
self._doBroadcast(txp, function(err2, txp) {
return cb(err2, txp, memo, err);
});
});
} else {
self._doBroadcast(txp, cb);
}
});
};
/**
* Remove a transaction proposal
*
* @param {Object} txp
* @param {Callback} cb
* @return {Callback} cb - Return error or empty
*/
API.prototype.removeTxProposal = function(txp, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
var self = this;
var url = '/v1/txproposals/' + txp.id;
self.request.delete(url, function(err) {
return cb(err);
});
};
/**
* Get transaction history
*
* @param {Object} opts
* @param {Number} opts.skip (defaults to 0)
* @param {Number} opts.limit
* @param {Boolean} opts.includeExtendedInfo
* @param {Callback} cb
* @return {Callback} cb - Return error or array of transactions
*/
API.prototype.getTxHistory = function(opts, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
var self = this;
var args = [];
if (opts) {
if (opts.skip) args.push('skip=' + opts.skip);
if (opts.limit) args.push('limit=' + opts.limit);
if (opts.includeExtendedInfo) args.push('includeExtendedInfo=1');
}
var qs = '';
if (args.length > 0) {
qs = '?' + args.join('&');
}
var url = '/v1/txhistory/' + qs;
self.request.get(url, function(err, txs) {
if (err) return cb(err);
self._processTxps(txs);
return cb(null, txs);
});
};
/**
* getTx
*
* @param {String} TransactionId
* @return {Callback} cb - Return error or transaction
*/
API.prototype.getTx = function(id, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
var self = this;
var url = '/v1/txproposals/' + id;
this.request.get(url, function(err, txp) {
if (err) return cb(err);
self._processTxps(txp);
return cb(null, txp);
});
};
/**
* Start an address scanning process.
* When finished, the scanning process will send a notification 'ScanFinished' to all copayers.
*
* @param {Object} opts
* @param {Boolean} opts.includeCopayerBranches (defaults to false)
* @param {Callback} cb
*/
API.prototype.startScan = function(opts, cb) {
$.checkState(this.credentials && this.credentials.isComplete());
var self = this;
var args = {
includeCopayerBranches: opts.includeCopayerBranches,
};
self.request.post('/v1/addresses/scan', args, function(err) {
return cb(err);
});
};
/**
* Adds access to the current copayer
* @param {Object} opts
* @param {bool} opts.generateNewKey Optional: generate a new key for the new access
* @param {string} opts.restrictions
* - cannotProposeTXs
* - cannotXXX TODO
* @param {string} opts.name (name for the new access)
*
* return the accesses Wallet and the requestPrivateKey
*/
API.prototype.addAccess = function(opts, cb) {
$.checkState(this.credentials && this.credentials.canSign());
opts = opts || {};
var reqPrivKey = new Bitcore.PrivateKey(opts.generateNewKey ? null : this.credentials.requestPrivKey);
var requestPubKey = reqPrivKey.toPublicKey().toString();
var xPriv = new Bitcore.HDPrivateKey(this.credentials.xPrivKey)
.deriveChild(this.credentials.getBaseAddressDerivationPath());
var sig = Utils.signRequestPubKey(requestPubKey, xPriv);
var copayerId = this.credentials.copayerId;
var encCopayerName = opts.name ? Utils.encryptMessage(opts.name, this.credentials.sharedEncryptingKey) : null;
var opts = {
copayerId: copayerId,
requestPubKey: requestPubKey,
signature: sig,
name: encCopayerName,
restrictions: opts.restrictions,
};
this.request.put('/v1/copayers/' + copayerId + '/', opts, function(err, res) {
if (err) return cb(err);
return cb(null, res.wallet, reqPrivKey);
});
};
/**
* Get a note associated with the specified txid
* @param {Object} opts
* @param {string} opts.txid - The txid to associate this note with
*/
API.prototype.getTxNote = function(opts, cb) {
$.checkState(this.credentials);
var self = this;
opts = opts || {};
self.request.get('/v1/txnotes/' + opts.txid + '/', function(err, note) {
if (err) return cb(err);
self._processTxNotes(note);
return cb(null, note);
});
};
/**
* Edit a note associated with the specified txid
* @param {Object} opts
* @param {string} opts.txid - The txid to associate this note with
* @param {string} opts.body - The contents of the note
*/
API.prototype.editTxNote = function(opts, cb) {
$.checkState(this.credentials);
var self = this;
opts = opts || {};
if (opts.body) {
opts.body = API._encryptMessage(opts.body, this.credentials.sharedEncryptingKey);
}
self.request.put('/v1/txnotes/' + opts.txid + '/', opts, function(err, note) {
if (err) return cb(err);
self._processTxNotes(note);
return cb(null, note);
});
};
/**
* Get all notes edited after the specified date
* @param {Object} opts
* @param {string} opts.minTs - The starting timestamp
*/
API.prototype.getTxNotes = function(opts, cb) {
$.checkState(this.credentials);
var self = this;
opts = opts || {};
var args = [];
if (_.isNumber(opts.minTs)) {
args.push('minTs=' + opts.minTs);
}
var qs = '';
if (args.length > 0) {
qs = '?' + args.join('&');
}
self.request.get('/v1/txnotes/' + qs, function(err, notes) {
if (err) return cb(err);
self._processTxNotes(notes);
return cb(null, notes);
});
};
/**
* Returns exchange rate for the specified currency & timestamp.
* @param {Object} opts
* @param {string} opts.code - Currency ISO code.
* @param {Date} [opts.ts] - A timestamp to base the rate on (default Date.now()).
* @param {String} [opts.provider] - A provider of exchange rates (default 'BitPay').
* @returns {Object} rates - The exchange rate.
*/
API.prototype.getFiatRate = function(opts, cb) {
$.checkArgument(cb);
var self = this;
var opts = opts || {};
var args = [];
if (opts.ts) args.push('ts=' + opts.ts);
if (opts.provider) args.push('provider=' + opts.provider);
var qs = '';
if (args.length > 0) {
qs = '?' + args.join('&');
}
self.request.get('/v1/fiatrates/' + opts.code + '/' + qs, function(err, rates) {
if (err) return cb(err);
return cb(null, rates);
});
}
/**
* Subscribe to push notifications.
* @param {Object} opts
* @param {String} opts.type - Device type (ios or android).
* @param {String} opts.token - Device token.
* @returns {Object} response - Status of subscription.
*/
API.prototype.pushNotificationsSubscribe = function(opts, cb) {
var url = '/v1/pushnotifications/subscriptions/';
this.request.post(url, opts, function(err, response) {
if (err) return cb(err);
return cb(null, response);
});
};
/**
* Unsubscribe from push notifications.
* @param {String} token - Device token
* @return {Callback} cb - Return error if exists
*/
API.prototype.pushNotificationsUnsubscribe = function(token, cb) {
var url = '/v2/pushnotifications/subscriptions/' + token;
this.request.delete(url, cb);
};
/**
* Listen to a tx for its first confirmation.
* @param {Object} opts
* @param {String} opts.txid - The txid to subscribe to.
* @returns {Object} response - Status of subscription.
*/
API.prototype.txConfirmationSubscribe = function(opts, cb) {
var url = '/v1/txconfirmations/';
this.request.post(url, opts, function(err, response) {
if (err) return cb(err);
return cb(null, response);
});
};
/**
* Stop listening for a tx confirmation.
* @param {String} txid - The txid to unsubscribe from.
* @return {Callback} cb - Return error if exists
*/
API.prototype.txConfirmationUnsubscribe = function(txid, cb) {
var url = '/v1/txconfirmations/' + txid;
this.request.delete(url, cb);
};
/**
* Returns send max information.
* @param {String} opts
* @param {number} opts.feeLevel[='normal'] - Optional. Specify the fee level ('priority', 'normal', 'economy', 'superEconomy').
* @param {number} opts.feePerKb - Optional. Specify the fee per KB (in satoshi).
* @param {Boolean} opts.excludeUnconfirmedUtxos - Indicates it if should use (or not) the unconfirmed utxos
* @param {Boolean} opts.returnInputs - Indicates it if should return (or not) the inputs
* @return {Callback} cb - Return error (if exists) and object result
*/
API.prototype.getSendMaxInfo = function(opts, cb) {
var self = this;
var args = [];
opts = opts || {};
if (opts.feeLevel) args.push('feeLevel=' + opts.feeLevel);
if (opts.feePerKb != null) args.push('feePerKb=' + opts.feePerKb);
if (opts.excludeUnconfirmedUtxos) args.push('excludeUnconfirmedUtxos=1');
if (opts.returnInputs) args.push('returnInputs=1');
var qs = '';
if (args.length > 0)
qs = '?' + args.join('&');
var url = '/v1/sendmaxinfo/' + qs;
self.request.get(url, function(err, result) {
if (err) return cb(err);
return cb(null, result);
});
};
/**
* Get wallet status based on a string identifier (one of: walletId, address, txid)
*
* @param {string} opts.identifier - The identifier
* @param {Boolean} opts.includeExtendedInfo (optional: query extended status)
* @param {Boolean} opts.walletCheck (optional: run v8 walletCheck if wallet found)
* @returns {Callback} cb - Returns error or an object with status information
*/
API.prototype.getStatusByIdentifier = function(opts, cb) {
$.checkState(this.credentials);
var self = this;
opts = opts || {};
var qs = [];
qs.push('includeExtendedInfo=' + (opts.includeExtendedInfo ? '1' : '0'));
qs.push('walletCheck=' + (opts.walletCheck ? '1' : '0'));
self.request.get('/v1/wallets/' + opts.identifier + '?' + qs.join('&'), function(err, result) {
if (err || !result || !result.wallet) return cb(err);
if (result.wallet.status == 'pending') {
var c = self.credentials;
result.wallet.secret = API._buildSecret(c.walletId, c.walletPrivKey, c.coin, c.network);
}
self._processStatus(result);
return cb(err, result);
});
};
/*
*
* Compatibility Functions
*
*/
API.prototype._oldCopayDecrypt = function(username, password, blob) {
var SEP1 = '@#$';
var SEP2 = '%^#@';
var decrypted;
try {
var passphrase = username + SEP1 + password;
decrypted = sjcl.decrypt(passphrase, blob);
} catch (e) {
passphrase = username + SEP2 + password;
try {
decrypted = sjcl.decrypt(passphrase, blob);
} catch (e) {
log.debug(e);
};
}
if (!decrypted)
return null;
var ret;
try {
ret = JSON.parse(decrypted);
} catch (e) {};
return ret;
};
API.prototype.getWalletIdsFromOldCopay = function(username, password, blob) {
var p = this._oldCopayDecrypt(username, password, blob);
if (!p) return null;
var ids = p.walletIds.concat(_.keys(p.focusedTimestamps));
return _.uniq(ids);
};
API.PayPro = PayPro;
module.exports = API;
| 1 | 15,218 | you can use `toString(true)` that will work for BTC and remove the prefix for BCH. | bitpay-bitcore | js |
@@ -652,9 +652,10 @@ class TabbedBrowser(tabwidget.TabWidget):
log.modes.debug("Current tab changed, focusing {!r}".format(tab))
tab.setFocus()
- for mode in [usertypes.KeyMode.hint, usertypes.KeyMode.insert,
- usertypes.KeyMode.caret, usertypes.KeyMode.passthrough]:
- modeman.leave(self._win_id, mode, 'tab changed', maybe=True)
+ if not config.val.tabs.persist_mode_on_change:
+ for mode in [usertypes.KeyMode.hint, usertypes.KeyMode.insert,
+ usertypes.KeyMode.caret, usertypes.KeyMode.passthrough]:
+ modeman.leave(self._win_id, mode, 'tab changed', maybe=True)
if self._now_focused is not None:
objreg.register('last-focused-tab', self._now_focused, update=True,
scope='window', window=self._win_id) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main tabbed browser widget."""
import functools
import attr
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QTimer, QUrl
from PyQt5.QtGui import QIcon
from qutebrowser.config import config
from qutebrowser.keyinput import modeman
from qutebrowser.mainwindow import tabwidget, mainwindow
from qutebrowser.browser import signalfilter, browsertab
from qutebrowser.utils import (log, usertypes, utils, qtutils, objreg,
urlutils, message, jinja)
@attr.s
class UndoEntry:
"""Information needed for :undo."""
url = attr.ib()
history = attr.ib()
index = attr.ib()
pinned = attr.ib()
class TabDeletedError(Exception):
"""Exception raised when _tab_index is called for a deleted tab."""
class TabbedBrowser(tabwidget.TabWidget):
"""A TabWidget with QWebViews inside.
Provides methods to manage tabs, convenience methods to interact with the
current tab (cur_*) and filters signals to re-emit them when they occurred
in the currently visible tab.
For all tab-specific signals (cur_*) emitted by a tab, this happens:
- the signal gets filtered with _filter_signals and self.cur_* gets
emitted if the signal occurred in the current tab.
Attributes:
search_text/search_options: Search parameters which are shared between
all tabs.
_win_id: The window ID this tabbedbrowser is associated with.
_filter: A SignalFilter instance.
_now_focused: The tab which is focused now.
_tab_insert_idx_left: Where to insert a new tab with
tabs.new_tab_position set to 'prev'.
_tab_insert_idx_right: Same as above, for 'next'.
_undo_stack: List of lists of UndoEntry objects of closed tabs.
shutting_down: Whether we're currently shutting down.
_local_marks: Jump markers local to each page
_global_marks: Jump markers used across all pages
default_window_icon: The qutebrowser window icon
private: Whether private browsing is on for this window.
Signals:
cur_progress: Progress of the current tab changed (load_progress).
cur_load_started: Current tab started loading (load_started)
cur_load_finished: Current tab finished loading (load_finished)
cur_url_changed: Current URL changed.
cur_link_hovered: Link hovered in current tab (link_hovered)
cur_scroll_perc_changed: Scroll percentage of current tab changed.
arg 1: x-position in %.
arg 2: y-position in %.
cur_load_status_changed: Loading status of current tab changed.
close_window: The last tab was closed, close this window.
resized: Emitted when the browser window has resized, so the completion
widget can adjust its size to it.
arg: The new size.
current_tab_changed: The current tab changed to the emitted tab.
new_tab: Emits the new WebView and its index when a new tab is opened.
"""
cur_progress = pyqtSignal(int)
cur_load_started = pyqtSignal()
cur_load_finished = pyqtSignal(bool)
cur_url_changed = pyqtSignal(QUrl)
cur_link_hovered = pyqtSignal(str)
cur_scroll_perc_changed = pyqtSignal(int, int)
cur_load_status_changed = pyqtSignal(str)
cur_fullscreen_requested = pyqtSignal(bool)
close_window = pyqtSignal()
resized = pyqtSignal('QRect')
current_tab_changed = pyqtSignal(browsertab.AbstractTab)
new_tab = pyqtSignal(browsertab.AbstractTab, int)
def __init__(self, *, win_id, private, parent=None):
super().__init__(win_id, parent)
self._win_id = win_id
self._tab_insert_idx_left = 0
self._tab_insert_idx_right = -1
self.shutting_down = False
self.tabCloseRequested.connect(self.on_tab_close_requested)
self.new_tab_requested.connect(self.tabopen)
self.currentChanged.connect(self.on_current_changed)
self.cur_load_started.connect(self.on_cur_load_started)
self.cur_fullscreen_requested.connect(self.tabBar().maybe_hide)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self._undo_stack = []
self._filter = signalfilter.SignalFilter(win_id, self)
self._now_focused = None
self.search_text = None
self.search_options = {}
self._local_marks = {}
self._global_marks = {}
self.default_window_icon = self.window().windowIcon()
self.private = private
config.instance.changed.connect(self._on_config_changed)
def __repr__(self):
return utils.get_repr(self, count=self.count())
@pyqtSlot(str)
def _on_config_changed(self, option):
if option == 'tabs.favicons.show':
self._update_favicons()
elif option == 'window.title_format':
self._update_window_title()
elif option in ['tabs.title.format', 'tabs.title.format_pinned']:
self._update_tab_titles()
def _tab_index(self, tab):
"""Get the index of a given tab.
Raises TabDeletedError if the tab doesn't exist anymore.
"""
try:
idx = self.indexOf(tab)
except RuntimeError as e:
log.webview.debug("Got invalid tab ({})!".format(e))
raise TabDeletedError(e)
if idx == -1:
log.webview.debug("Got invalid tab (index is -1)!")
raise TabDeletedError("index is -1!")
return idx
def widgets(self):
"""Get a list of open tab widgets.
We don't implement this as generator so we can delete tabs while
iterating over the list.
"""
widgets = []
for i in range(self.count()):
widget = self.widget(i)
if widget is None:
log.webview.debug("Got None-widget in tabbedbrowser!")
else:
widgets.append(widget)
return widgets
def _update_window_title(self, field=None):
"""Change the window title to match the current tab.
Args:
idx: The tab index to update.
field: A field name which was updated. If given, the title
is only set if the given field is in the template.
"""
title_format = config.val.window.title_format
if field is not None and ('{' + field + '}') not in title_format:
return
idx = self.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating window title because index is -1")
return
fields = self.get_tab_fields(idx)
fields['id'] = self._win_id
title = title_format.format(**fields)
self.window().setWindowTitle(title)
def _connect_tab_signals(self, tab):
"""Set up the needed signals for tab."""
# filtered signals
tab.link_hovered.connect(
self._filter.create(self.cur_link_hovered, tab))
tab.load_progress.connect(
self._filter.create(self.cur_progress, tab))
tab.load_finished.connect(
self._filter.create(self.cur_load_finished, tab))
tab.load_started.connect(
self._filter.create(self.cur_load_started, tab))
tab.scroller.perc_changed.connect(
self._filter.create(self.cur_scroll_perc_changed, tab))
tab.url_changed.connect(
self._filter.create(self.cur_url_changed, tab))
tab.load_status_changed.connect(
self._filter.create(self.cur_load_status_changed, tab))
tab.fullscreen_requested.connect(
self._filter.create(self.cur_fullscreen_requested, tab))
# misc
tab.scroller.perc_changed.connect(self.on_scroll_pos_changed)
tab.url_changed.connect(
functools.partial(self.on_url_changed, tab))
tab.title_changed.connect(
functools.partial(self.on_title_changed, tab))
tab.icon_changed.connect(
functools.partial(self.on_icon_changed, tab))
tab.load_progress.connect(
functools.partial(self.on_load_progress, tab))
tab.load_finished.connect(
functools.partial(self.on_load_finished, tab))
tab.load_started.connect(
functools.partial(self.on_load_started, tab))
tab.window_close_requested.connect(
functools.partial(self.on_window_close_requested, tab))
tab.renderer_process_terminated.connect(
functools.partial(self._on_renderer_process_terminated, tab))
tab.new_tab_requested.connect(self.tabopen)
if not self.private:
web_history = objreg.get('web-history')
tab.add_history_item.connect(web_history.add_from_tab)
def current_url(self):
"""Get the URL of the current tab.
Intended to be used from command handlers.
Return:
The current URL as QUrl.
"""
idx = self.currentIndex()
return super().tab_url(idx)
def shutdown(self):
"""Try to shut down all tabs cleanly."""
self.shutting_down = True
for tab in self.widgets():
self._remove_tab(tab)
def tab_close_prompt_if_pinned(self, tab, force, yes_action):
"""Helper method for tab_close.
If tab is pinned, prompt. If not, run yes_action.
If tab is destroyed, abort question.
"""
if tab.data.pinned and not force:
message.confirm_async(
title='Pinned Tab',
text="Are you sure you want to close a pinned tab?",
yes_action=yes_action, default=False, abort_on=[tab.destroyed])
else:
yes_action()
def close_tab(self, tab, *, add_undo=True, new_undo=True):
"""Close a tab.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
"""
last_close = config.val.tabs.last_close
count = self.count()
if last_close == 'ignore' and count == 1:
return
self._remove_tab(tab, add_undo=add_undo, new_undo=new_undo)
if count == 1: # We just closed the last tab above.
if last_close == 'close':
self.close_window.emit()
elif last_close == 'blank':
self.openurl(QUrl('about:blank'), newtab=True)
elif last_close == 'startpage':
for url in config.val.url.start_pages:
self.openurl(url, newtab=True)
elif last_close == 'default-page':
self.openurl(config.val.url.default_page, newtab=True)
def _remove_tab(self, tab, *, add_undo=True, new_undo=True, crashed=False):
"""Remove a tab from the tab list and delete it properly.
Args:
tab: The QWebView to be closed.
add_undo: Whether the tab close can be undone.
new_undo: Whether the undo entry should be a new item in the stack.
crashed: Whether we're closing a tab with crashed renderer process.
"""
idx = self.indexOf(tab)
if idx == -1:
if crashed:
return
raise TabDeletedError("tab {} is not contained in "
"TabbedWidget!".format(tab))
if tab is self._now_focused:
self._now_focused = None
if tab is objreg.get('last-focused-tab', None, scope='window',
window=self._win_id):
objreg.delete('last-focused-tab', scope='window',
window=self._win_id)
if tab.url().isEmpty():
# There are some good reasons why a URL could be empty
# (target="_blank" with a download, see [1]), so we silently ignore
# this.
# [1] https://github.com/qutebrowser/qutebrowser/issues/163
pass
elif not tab.url().isValid():
# We display a warning for URLs which are not empty but invalid -
# but we don't return here because we want the tab to close either
# way.
urlutils.invalid_url_error(tab.url(), "saving tab")
elif add_undo:
try:
history_data = tab.history.serialize()
except browsertab.WebTabError:
pass # special URL
else:
entry = UndoEntry(tab.url(), history_data, idx,
tab.data.pinned)
if new_undo or not self._undo_stack:
self._undo_stack.append([entry])
else:
self._undo_stack[-1].append(entry)
tab.shutdown()
self.removeTab(idx)
if not crashed:
# WORKAROUND for a segfault when we delete the crashed tab.
# see https://bugreports.qt.io/browse/QTBUG-58698
tab.layout().unwrap()
tab.deleteLater()
def undo(self):
"""Undo removing of a tab or tabs."""
# Remove unused tab which may be created after the last tab is closed
last_close = config.val.tabs.last_close
use_current_tab = False
if last_close in ['blank', 'startpage', 'default-page']:
only_one_tab_open = self.count() == 1
no_history = len(self.widget(0).history) == 1
urls = {
'blank': QUrl('about:blank'),
'startpage': config.val.url.start_pages[0],
'default-page': config.val.url.default_page,
}
first_tab_url = self.widget(0).url()
last_close_urlstr = urls[last_close].toString().rstrip('/')
first_tab_urlstr = first_tab_url.toString().rstrip('/')
last_close_url_used = first_tab_urlstr == last_close_urlstr
use_current_tab = (only_one_tab_open and no_history and
last_close_url_used)
for entry in reversed(self._undo_stack.pop()):
if use_current_tab:
self.openurl(entry.url, newtab=False)
newtab = self.widget(0)
use_current_tab = False
else:
newtab = self.tabopen(entry.url, background=False,
idx=entry.index)
newtab.history.deserialize(entry.history)
self.set_tab_pinned(newtab, entry.pinned)
@pyqtSlot('QUrl', bool)
def openurl(self, url, newtab):
"""Open a URL, used as a slot.
Args:
url: The URL to open as QUrl.
newtab: True to open URL in a new tab, False otherwise.
"""
qtutils.ensure_valid(url)
if newtab or self.currentWidget() is None:
self.tabopen(url, background=False)
else:
self.currentWidget().openurl(url)
@pyqtSlot(int)
def on_tab_close_requested(self, idx):
"""Close a tab via an index."""
tab = self.widget(idx)
if tab is None:
log.webview.debug("Got invalid tab {} for index {}!".format(
tab, idx))
return
self.tab_close_prompt_if_pinned(
tab, False, lambda: self.close_tab(tab))
@pyqtSlot(browsertab.AbstractTab)
def on_window_close_requested(self, widget):
"""Close a tab with a widget given."""
try:
self.close_tab(widget)
except TabDeletedError:
log.webview.debug("Requested to close {!r} which does not "
"exist!".format(widget))
@pyqtSlot('QUrl')
@pyqtSlot('QUrl', bool)
@pyqtSlot('QUrl', bool, bool)
def tabopen(self, url=None, background=None, related=True, idx=None, *,
ignore_tabs_are_windows=False):
"""Open a new tab with a given URL.
Inner logic for open-tab and open-tab-bg.
Also connect all the signals we need to _filter_signals.
Args:
url: The URL to open as QUrl or None for an empty tab.
background: Whether to open the tab in the background.
if None, the `tabs.background_tabs`` setting decides.
related: Whether the tab was opened from another existing tab.
If this is set, the new position might be different. With
the default settings we handle it like Chromium does:
- Tabs from clicked links etc. are to the right of
the current (related=True).
- Explicitly opened tabs are at the very right
(related=False)
idx: The index where the new tab should be opened.
ignore_tabs_are_windows: If given, never open a new window, even
with tabs.tabs_are_windows set.
Return:
The opened WebView instance.
"""
if url is not None:
qtutils.ensure_valid(url)
log.webview.debug("Creating new tab with URL {}, background {}, "
"related {}, idx {}".format(
url, background, related, idx))
if (config.val.tabs.tabs_are_windows and self.count() > 0 and
not ignore_tabs_are_windows):
window = mainwindow.MainWindow(private=self.private)
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
return tabbed_browser.tabopen(url=url, background=background,
related=related)
tab = browsertab.create(win_id=self._win_id, private=self.private,
parent=self)
self._connect_tab_signals(tab)
if idx is None:
idx = self._get_new_tab_idx(related)
self.insertTab(idx, tab, "")
if url is not None:
tab.openurl(url)
if background is None:
background = config.val.tabs.background
if background:
# Make sure the background tab has the correct initial size.
# With a foreground tab, it's going to be resized correctly by the
# layout anyways.
tab.resize(self.currentWidget().size())
self.tab_index_changed.emit(self.currentIndex(), self.count())
else:
self.setCurrentWidget(tab)
tab.show()
self.new_tab.emit(tab, idx)
return tab
def _get_new_tab_idx(self, related):
"""Get the index of a tab to insert.
Args:
related: Whether the tab was opened from another tab (as a "child")
Return:
The index of the new tab.
"""
if related:
pos = config.val.tabs.new_position.related
else:
pos = config.val.tabs.new_position.unrelated
if pos == 'prev':
idx = self._tab_insert_idx_left
# On first sight, we'd think we have to decrement
# self._tab_insert_idx_left here, as we want the next tab to be
# *before* the one we just opened. However, since we opened a tab
# *before* the currently focused tab, indices will shift by
# 1 automatically.
elif pos == 'next':
idx = self._tab_insert_idx_right
self._tab_insert_idx_right += 1
elif pos == 'first':
idx = 0
elif pos == 'last':
idx = -1
else:
raise ValueError("Invalid tabs.new_position '{}'.".format(pos))
log.webview.debug("tabs.new_position {} -> opening new tab at {}, "
"next left: {} / right: {}".format(
pos, idx, self._tab_insert_idx_left,
self._tab_insert_idx_right))
return idx
def _update_favicons(self):
"""Update favicons when config was changed."""
for i, tab in enumerate(self.widgets()):
if config.val.tabs.favicons.show:
self.setTabIcon(i, tab.icon())
if config.val.tabs.tabs_are_windows:
self.window().setWindowIcon(tab.icon())
else:
self.setTabIcon(i, QIcon())
if config.val.tabs.tabs_are_windows:
self.window().setWindowIcon(self.default_window_icon)
@pyqtSlot()
def on_load_started(self, tab):
"""Clear icon and update title when a tab started loading.
Args:
tab: The tab where the signal belongs to.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self._update_tab_title(idx)
if tab.data.keep_icon:
tab.data.keep_icon = False
else:
self.setTabIcon(idx, QIcon())
if (config.val.tabs.tabs_are_windows and
config.val.tabs.favicons.show):
self.window().setWindowIcon(self.default_window_icon)
if idx == self.currentIndex():
self._update_window_title()
@pyqtSlot()
def on_cur_load_started(self):
"""Leave insert/hint mode when loading started."""
modeman.leave(self._win_id, usertypes.KeyMode.insert, 'load started',
maybe=True)
modeman.leave(self._win_id, usertypes.KeyMode.hint, 'load started',
maybe=True)
@pyqtSlot(browsertab.AbstractTab, str)
def on_title_changed(self, tab, text):
"""Set the title of a tab.
Slot for the title_changed signal of any tab.
Args:
tab: The WebView where the title was changed.
text: The text to set.
"""
if not text:
log.webview.debug("Ignoring title change to '{}'.".format(text))
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
log.webview.debug("Changing title for idx {} to '{}'".format(
idx, text))
self.set_page_title(idx, text)
if idx == self.currentIndex():
self._update_window_title()
@pyqtSlot(browsertab.AbstractTab, QUrl)
def on_url_changed(self, tab, url):
"""Set the new URL as title if there's no title yet.
Args:
tab: The WebView where the title was changed.
url: The new URL.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if not self.page_title(idx):
self.set_page_title(idx, url.toDisplayString())
@pyqtSlot(browsertab.AbstractTab, QIcon)
def on_icon_changed(self, tab, icon):
"""Set the icon of a tab.
Slot for the iconChanged signal of any tab.
Args:
tab: The WebView where the title was changed.
icon: The new icon
"""
if not config.val.tabs.favicons.show:
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.setTabIcon(idx, icon)
if config.val.tabs.tabs_are_windows:
self.window().setWindowIcon(icon)
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Give focus to current tab if command mode was left."""
if mode in [usertypes.KeyMode.command, usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno]:
widget = self.currentWidget()
log.modes.debug("Left status-input mode, focusing {!r}".format(
widget))
if widget is None:
return
widget.setFocus()
@pyqtSlot(int)
def on_current_changed(self, idx):
"""Set last-focused-tab and leave hinting mode when focus changed."""
if idx == -1 or self.shutting_down:
# closing the last tab (before quitting) or shutting down
return
tab = self.widget(idx)
if tab is None:
log.webview.debug("on_current_changed got called with invalid "
"index {}".format(idx))
return
log.modes.debug("Current tab changed, focusing {!r}".format(tab))
tab.setFocus()
for mode in [usertypes.KeyMode.hint, usertypes.KeyMode.insert,
usertypes.KeyMode.caret, usertypes.KeyMode.passthrough]:
modeman.leave(self._win_id, mode, 'tab changed', maybe=True)
if self._now_focused is not None:
objreg.register('last-focused-tab', self._now_focused, update=True,
scope='window', window=self._win_id)
self._now_focused = tab
self.current_tab_changed.emit(tab)
QTimer.singleShot(0, self._update_window_title)
self._tab_insert_idx_left = self.currentIndex()
self._tab_insert_idx_right = self.currentIndex() + 1
@pyqtSlot()
def on_cmd_return_pressed(self):
"""Set focus when the commandline closes."""
log.modes.debug("Commandline closed, focusing {!r}".format(self))
def on_load_progress(self, tab, perc):
"""Adjust tab indicator on load progress."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
start = config.val.colors.tabs.indicator.start
stop = config.val.colors.tabs.indicator.stop
system = config.val.colors.tabs.indicator.system
color = utils.interpolate_color(start, stop, perc, system)
self.set_tab_indicator_color(idx, color)
self._update_tab_title(idx)
if idx == self.currentIndex():
self._update_window_title()
def on_load_finished(self, tab, ok):
"""Adjust tab indicator when loading finished."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if ok:
start = config.val.colors.tabs.indicator.start
stop = config.val.colors.tabs.indicator.stop
system = config.val.colors.tabs.indicator.system
color = utils.interpolate_color(start, stop, 100, system)
else:
color = config.val.colors.tabs.indicator.error
self.set_tab_indicator_color(idx, color)
self._update_tab_title(idx)
if idx == self.currentIndex():
self._update_window_title()
tab.handle_auto_insert_mode(ok)
@pyqtSlot()
def on_scroll_pos_changed(self):
"""Update tab and window title when scroll position changed."""
idx = self.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating scroll position because index is "
"-1")
return
self._update_window_title('scroll_pos')
self._update_tab_title(idx, 'scroll_pos')
def _on_renderer_process_terminated(self, tab, status, code):
"""Show an error when a renderer process terminated."""
if status == browsertab.TerminationStatus.normal:
return
messages = {
browsertab.TerminationStatus.abnormal:
"Renderer process exited with status {}".format(code),
browsertab.TerminationStatus.crashed:
"Renderer process crashed",
browsertab.TerminationStatus.killed:
"Renderer process was killed",
browsertab.TerminationStatus.unknown:
"Renderer process did not start",
}
msg = messages[status]
def show_error_page(html):
tab.set_html(html)
log.webview.error(msg)
if qtutils.version_check('5.9', compiled=False):
url_string = tab.url(requested=True).toDisplayString()
error_page = jinja.render(
'error.html', title="Error loading {}".format(url_string),
url=url_string, error=msg)
QTimer.singleShot(100, lambda: show_error_page(error_page))
else:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58698
message.error(msg)
self._remove_tab(tab, crashed=True)
if self.count() == 0:
self.tabopen(QUrl('about:blank'))
def resizeEvent(self, e):
"""Extend resizeEvent of QWidget to emit a resized signal afterwards.
Args:
e: The QResizeEvent
"""
super().resizeEvent(e)
self.resized.emit(self.geometry())
def wheelEvent(self, e):
"""Override wheelEvent of QWidget to forward it to the focused tab.
Args:
e: The QWheelEvent
"""
if self._now_focused is not None:
self._now_focused.wheelEvent(e)
else:
e.ignore()
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
# strip the fragment as it may interfere with scrolling
try:
url = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
# show an error only if the mark is not automatically set
if key != "'":
message.error("Failed to set mark: url invalid")
return
point = self.currentWidget().scroller.pos_px()
if key.isupper():
self._global_marks[key] = point, url
else:
if url not in self._local_marks:
self._local_marks[url] = {}
self._local_marks[url][key] = point
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
try:
# consider urls that differ only in fragment to be identical
urlkey = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
urlkey = None
tab = self.currentWidget()
if key.isupper():
if key in self._global_marks:
point, url = self._global_marks[key]
def callback(ok):
if ok:
self.cur_load_finished.disconnect(callback)
tab.scroller.to_point(point)
self.openurl(url, newtab=False)
self.cur_load_finished.connect(callback)
else:
message.error("Mark {} is not set".format(key))
elif urlkey is None:
message.error("Current URL is invalid!")
elif urlkey in self._local_marks and key in self._local_marks[urlkey]:
point = self._local_marks[urlkey][key]
# save the pre-jump position in the special ' mark
# this has to happen after we read the mark, otherwise jump_mark
# "'" would just jump to the current position every time
self.set_mark("'")
tab.scroller.to_point(point)
else:
message.error("Mark {} is not set".format(key))
| 1 | 19,772 | This line is too long now - please break it after the comma. | qutebrowser-qutebrowser | py |
@@ -67,6 +67,7 @@ type StreamerDisconnecter interface {
type Stream interface {
io.ReadWriter
io.Closer
+ ResponseHeaders() Headers
Headers() Headers
FullClose() error
Reset() error | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package p2p provides the peer-to-peer abstractions used
// across different protocols in Bee.
package p2p
import (
"context"
"io"
"time"
"github.com/ethersphere/bee/pkg/bzz"
"github.com/ethersphere/bee/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
)
// Service provides methods to handle p2p Peers and Protocols.
type Service interface {
AddProtocol(ProtocolSpec) error
// Connect to a peer but do not notify topology about the established connection.
Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error)
Disconnecter
Peers() []Peer
BlocklistedPeers() ([]Peer, error)
Addresses() ([]ma.Multiaddr, error)
SetPickyNotifier(PickyNotifier)
}
type Disconnecter interface {
Disconnect(overlay swarm.Address) error
// Blocklist will disconnect a peer and put it on a blocklist (blocking in & out connections) for provided duration
// duration 0 is treated as an infinite duration
Blocklist(overlay swarm.Address, duration time.Duration) error
}
// PickyNotifer can decide whether a peer should be picked
type PickyNotifier interface {
Pick(Peer) bool
Notifier
}
type Notifier interface {
Connected(context.Context, Peer) error
Disconnected(Peer)
}
// DebugService extends the Service with method used for debugging.
type DebugService interface {
Service
SetWelcomeMessage(val string) error
GetWelcomeMessage() string
}
// Streamer is able to create a new Stream.
type Streamer interface {
NewStream(ctx context.Context, address swarm.Address, h Headers, protocol, version, stream string) (Stream, error)
}
type StreamerDisconnecter interface {
Streamer
Disconnecter
}
// Stream represent a bidirectional data Stream.
type Stream interface {
io.ReadWriter
io.Closer
Headers() Headers
FullClose() error
Reset() error
}
// ProtocolSpec defines a collection of Stream specifications with handlers.
type ProtocolSpec struct {
Name string
Version string
StreamSpecs []StreamSpec
ConnectIn func(context.Context, Peer) error
ConnectOut func(context.Context, Peer) error
DisconnectIn func(Peer) error
DisconnectOut func(Peer) error
}
// StreamSpec defines a Stream handling within the protocol.
type StreamSpec struct {
Name string
Handler HandlerFunc
Headler HeadlerFunc
}
// Peer holds information about a Peer.
type Peer struct {
Address swarm.Address `json:"address"`
}
// HandlerFunc handles a received Stream from a Peer.
type HandlerFunc func(context.Context, Peer, Stream) error
// HandlerMiddleware decorates a HandlerFunc by returning a new one.
type HandlerMiddleware func(HandlerFunc) HandlerFunc
// HeadlerFunc is returning response headers based on the received request
// headers.
type HeadlerFunc func(Headers) Headers
// Headers represents a collection of p2p header key value pairs.
type Headers map[string][]byte
// Common header names.
const (
HeaderNameTracingSpanContext = "tracing-span-context"
)
// NewSwarmStreamName constructs a libp2p compatible stream name out of
// protocol name and version and stream name.
func NewSwarmStreamName(protocol, version, stream string) string {
return "/swarm/" + protocol + "/" + version + "/" + stream
}
| 1 | 13,526 | I find this addition to the interface a bit contentious. I think it would be cleaner to just return the response headers together with the new stream on `NewStream`. I.e. change the method signature for `NewStream` to be: `NewStream(ctx context.Context, overlay swarm.Address, headers p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, p2p.Headers, error)`. I'd like to hear what other think. cc @zelig @janos | ethersphere-bee | go |
@@ -222,6 +222,12 @@ class CartFacade
if ($cart !== null) {
$this->cartWatcherFacade->checkCartModifications($cart);
+
+ if ($cart->isEmpty()) {
+ $this->deleteCart($cart);
+
+ return null;
+ }
}
return $cart; | 1 | <?php
namespace Shopsys\FrameworkBundle\Model\Cart;
use Doctrine\ORM\EntityManagerInterface;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Model\Cart\Item\CartItemFactoryInterface;
use Shopsys\FrameworkBundle\Model\Cart\Watcher\CartWatcherFacade;
use Shopsys\FrameworkBundle\Model\Customer\CurrentCustomer;
use Shopsys\FrameworkBundle\Model\Customer\CustomerIdentifier;
use Shopsys\FrameworkBundle\Model\Customer\CustomerIdentifierFactory;
use Shopsys\FrameworkBundle\Model\Order\PromoCode\CurrentPromoCodeFacade;
use Shopsys\FrameworkBundle\Model\Product\Pricing\ProductPriceCalculationForUser;
use Shopsys\FrameworkBundle\Model\Product\ProductRepository;
class CartFacade
{
const DAYS_LIMIT_FOR_UNREGISTERED = 60;
const DAYS_LIMIT_FOR_REGISTERED = 120;
/**
* @var \Doctrine\ORM\EntityManagerInterface
*/
protected $em;
/**
* @var \Shopsys\FrameworkBundle\Model\Cart\CartFactory
*/
protected $cartFactory;
/**
* @var \Shopsys\FrameworkBundle\Model\Product\ProductRepository
*/
protected $productRepository;
/**
* @var \Shopsys\FrameworkBundle\Model\Customer\CustomerIdentifierFactory
*/
protected $customerIdentifierFactory;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain
*/
protected $domain;
/**
* @var \Shopsys\FrameworkBundle\Model\Customer\CurrentCustomer
*/
protected $currentCustomer;
/**
* @var \Shopsys\FrameworkBundle\Model\Order\PromoCode\CurrentPromoCodeFacade
*/
protected $currentPromoCodeFacade;
/**
* @var \Shopsys\FrameworkBundle\Model\Product\Pricing\ProductPriceCalculationForUser
*/
protected $productPriceCalculation;
/**
* @var \Shopsys\FrameworkBundle\Model\Cart\Item\CartItemFactoryInterface
*/
protected $cartItemFactory;
/**
* @var \Shopsys\FrameworkBundle\Model\Cart\CartRepository
*/
protected $cartRepository;
/**
* @var \Shopsys\FrameworkBundle\Model\Cart\Watcher\CartWatcherFacade
*/
protected $cartWatcherFacade;
/**
* @param \Doctrine\ORM\EntityManagerInterface $em
* @param \Shopsys\FrameworkBundle\Model\Cart\CartFactory $cartFactory
* @param \Shopsys\FrameworkBundle\Model\Product\ProductRepository $productRepository
* @param \Shopsys\FrameworkBundle\Model\Customer\CustomerIdentifierFactory $customerIdentifierFactory
* @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain
* @param \Shopsys\FrameworkBundle\Model\Customer\CurrentCustomer $currentCustomer
* @param \Shopsys\FrameworkBundle\Model\Order\PromoCode\CurrentPromoCodeFacade $currentPromoCodeFacade
* @param \Shopsys\FrameworkBundle\Model\Product\Pricing\ProductPriceCalculationForUser $productPriceCalculation
* @param \Shopsys\FrameworkBundle\Model\Cart\Item\CartItemFactoryInterface $cartItemFactory
* @param \Shopsys\FrameworkBundle\Model\Cart\CartRepository $cartRepository
* @param \Shopsys\FrameworkBundle\Model\Cart\Watcher\CartWatcherFacade $cartWatcherFacade
*/
public function __construct(
EntityManagerInterface $em,
CartFactory $cartFactory,
ProductRepository $productRepository,
CustomerIdentifierFactory $customerIdentifierFactory,
Domain $domain,
CurrentCustomer $currentCustomer,
CurrentPromoCodeFacade $currentPromoCodeFacade,
ProductPriceCalculationForUser $productPriceCalculation,
CartItemFactoryInterface $cartItemFactory,
CartRepository $cartRepository,
CartWatcherFacade $cartWatcherFacade
) {
$this->em = $em;
$this->cartFactory = $cartFactory;
$this->productRepository = $productRepository;
$this->customerIdentifierFactory = $customerIdentifierFactory;
$this->domain = $domain;
$this->currentCustomer = $currentCustomer;
$this->currentPromoCodeFacade = $currentPromoCodeFacade;
$this->productPriceCalculation = $productPriceCalculation;
$this->cartItemFactory = $cartItemFactory;
$this->cartRepository = $cartRepository;
$this->cartWatcherFacade = $cartWatcherFacade;
}
/**
* @param int $productId
* @param int $quantity
* @return \Shopsys\FrameworkBundle\Model\Cart\AddProductResult
*/
public function addProductToCart($productId, $quantity)
{
$product = $this->productRepository->getSellableById(
$productId,
$this->domain->getId(),
$this->currentCustomer->getPricingGroup()
);
$cart = $this->getCartOfCurrentCustomerCreateIfNotExists();
/* @var $result \Shopsys\FrameworkBundle\Model\Cart\AddProductResult */
$result = $cart->addProduct($product, $quantity, $this->productPriceCalculation, $this->cartItemFactory);
$this->em->persist($result->getCartItem());
$this->em->flush();
return $result;
}
/**
* @param array $quantitiesByCartItemId
*/
public function changeQuantities(array $quantitiesByCartItemId)
{
$cart = $this->findCartOfCurrentCustomer();
if ($cart === null) {
throw new \Shopsys\FrameworkBundle\Model\Cart\Exception\CartIsEmptyException();
}
$cart->changeQuantities($quantitiesByCartItemId);
$this->em->flush();
}
/**
* @param int $cartItemId
*/
public function deleteCartItem($cartItemId)
{
$cart = $this->findCartOfCurrentCustomer();
if ($cart === null) {
throw new \Shopsys\FrameworkBundle\Model\Cart\Exception\CartIsEmptyException();
}
$cartItemToDelete = $cart->getItemById($cartItemId);
$cart->removeItemById($cartItemId);
$this->em->remove($cartItemToDelete);
$this->em->flush();
if ($cart->isEmpty()) {
$this->deleteCart($cart);
}
}
public function deleteCartOfCurrentCustomer()
{
$cart = $this->findCartOfCurrentCustomer();
if ($cart !== null) {
$this->deleteCart($cart);
}
}
/**
* @param \Shopsys\FrameworkBundle\Model\Cart\Cart $cart
*/
public function deleteCart(Cart $cart)
{
$this->em->remove($cart);
$this->em->flush();
$this->cleanAdditionalData();
}
/**
* @param int $cartItemId
* @return \Shopsys\FrameworkBundle\Model\Product\Product
*/
public function getProductByCartItemId($cartItemId)
{
$cart = $this->findCartOfCurrentCustomer();
if ($cart === null) {
$message = 'CartItem with id = ' . $cartItemId . ' not found in cart.';
throw new \Shopsys\FrameworkBundle\Model\Cart\Exception\InvalidCartItemException($message);
}
return $cart->getItemById($cartItemId)->getProduct();
}
public function cleanAdditionalData()
{
$this->currentPromoCodeFacade->removeEnteredPromoCode();
}
/**
* @param \Shopsys\FrameworkBundle\Model\Customer\CustomerIdentifier $customerIdentifier
* @return \Shopsys\FrameworkBundle\Model\Cart\Cart|null
*/
public function findCartByCustomerIdentifier(CustomerIdentifier $customerIdentifier)
{
$cart = $this->cartRepository->findByCustomerIdentifier($customerIdentifier);
if ($cart !== null) {
$this->cartWatcherFacade->checkCartModifications($cart);
}
return $cart;
}
/**
* @return \Shopsys\FrameworkBundle\Model\Cart\Cart|null
*/
public function findCartOfCurrentCustomer()
{
$customerIdentifier = $this->customerIdentifierFactory->get();
return $this->findCartByCustomerIdentifier($customerIdentifier);
}
/**
* @return \Shopsys\FrameworkBundle\Model\Cart\Cart
*/
public function getCartOfCurrentCustomerCreateIfNotExists()
{
$customerIdentifier = $this->customerIdentifierFactory->get();
return $this->getCartByCustomerIdentifierCreateIfNotExists($customerIdentifier);
}
/**
* @param \Shopsys\FrameworkBundle\Model\Customer\CustomerIdentifier $customerIdentifier
* @return \Shopsys\FrameworkBundle\Model\Cart\Cart
*/
public function getCartByCustomerIdentifierCreateIfNotExists(CustomerIdentifier $customerIdentifier)
{
$cart = $this->cartRepository->findByCustomerIdentifier($customerIdentifier);
if ($cart === null) {
$cart = $this->cartFactory->create($customerIdentifier);
$this->em->persist($cart);
$this->em->flush($cart);
}
return $cart;
}
/**
* @return \Shopsys\FrameworkBundle\Model\Order\Item\QuantifiedProduct[]
*/
public function getQuantifiedProductsOfCurrentCustomerIndexedByCartItemId()
{
$cart = $this->findCartOfCurrentCustomer();
if ($cart === null) {
return [];
}
return $cart->getQuantifiedProductsIndexedByItemId();
}
public function deleteOldCarts()
{
$this->cartRepository->deleteOldCartsForUnregisteredCustomers(self::DAYS_LIMIT_FOR_UNREGISTERED);
$this->cartRepository->deleteOldCartsForRegisteredCustomers(self::DAYS_LIMIT_FOR_REGISTERED);
}
}
| 1 | 14,775 | i hope there is some magic where isEmpty or some cron can strip all non listable products from cart, but since we have the situation tested it should be OK and also we'll see during tests | shopsys-shopsys | php |
@@ -127,7 +127,7 @@ public class HiveIcebergFilterFactory {
case FLOAT:
return leaf.getLiteral();
case DATE:
- return daysFromTimestamp((Timestamp) leaf.getLiteral());
+ return daysFromDate((Date) leaf.getLiteral());
case TIMESTAMP:
return microsFromTimestamp((Timestamp) LITERAL_FIELD.get(leaf));
case DECIMAL: | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.mr.hive;
import java.math.BigDecimal;
import java.sql.Date;
import java.sql.Timestamp;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hive.ql.io.sarg.ExpressionTree;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.iceberg.common.DynFields;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.util.DateTimeUtil;
import static org.apache.iceberg.expressions.Expressions.and;
import static org.apache.iceberg.expressions.Expressions.equal;
import static org.apache.iceberg.expressions.Expressions.greaterThanOrEqual;
import static org.apache.iceberg.expressions.Expressions.in;
import static org.apache.iceberg.expressions.Expressions.isNull;
import static org.apache.iceberg.expressions.Expressions.lessThan;
import static org.apache.iceberg.expressions.Expressions.lessThanOrEqual;
import static org.apache.iceberg.expressions.Expressions.not;
import static org.apache.iceberg.expressions.Expressions.or;
public class HiveIcebergFilterFactory {
private HiveIcebergFilterFactory() {
}
public static Expression generateFilterExpression(SearchArgument sarg) {
return translate(sarg.getExpression(), sarg.getLeaves());
}
/**
* Recursive method to traverse down the ExpressionTree to evaluate each expression and its leaf nodes.
* @param tree Current ExpressionTree where the 'top' node is being evaluated.
* @param leaves List of all leaf nodes within the tree.
* @return Expression that is translated from the Hive SearchArgument.
*/
private static Expression translate(ExpressionTree tree, List<PredicateLeaf> leaves) {
List<ExpressionTree> childNodes = tree.getChildren();
switch (tree.getOperator()) {
case OR:
Expression orResult = Expressions.alwaysFalse();
for (ExpressionTree child : childNodes) {
orResult = or(orResult, translate(child, leaves));
}
return orResult;
case AND:
Expression result = Expressions.alwaysTrue();
for (ExpressionTree child : childNodes) {
result = and(result, translate(child, leaves));
}
return result;
case NOT:
return not(translate(childNodes.get(0), leaves));
case LEAF:
return translateLeaf(leaves.get(tree.getLeaf()));
case CONSTANT:
throw new UnsupportedOperationException("CONSTANT operator is not supported");
default:
throw new UnsupportedOperationException("Unknown operator: " + tree.getOperator());
}
}
/**
* Translate leaf nodes from Hive operator to Iceberg operator.
* @param leaf Leaf node
* @return Expression fully translated from Hive PredicateLeaf
*/
private static Expression translateLeaf(PredicateLeaf leaf) {
String column = leaf.getColumnName();
switch (leaf.getOperator()) {
case EQUALS:
return equal(column, leafToLiteral(leaf));
case LESS_THAN:
return lessThan(column, leafToLiteral(leaf));
case LESS_THAN_EQUALS:
return lessThanOrEqual(column, leafToLiteral(leaf));
case IN:
return in(column, leafToLiteralList(leaf));
case BETWEEN:
List<Object> icebergLiterals = leafToLiteralList(leaf);
return and(greaterThanOrEqual(column, icebergLiterals.get(0)),
lessThanOrEqual(column, icebergLiterals.get(1)));
case IS_NULL:
return isNull(column);
default:
throw new UnsupportedOperationException("Unknown operator: " + leaf.getOperator());
}
}
// PredicateLeafImpl has a work-around for Kryo serialization with java.util.Date objects where it converts values to
// Timestamp using Date#getTime. This conversion discards microseconds, so this is a necessary to avoid it.
private static final DynFields.UnboundField<?> LITERAL_FIELD = DynFields.builder()
.hiddenImpl(SearchArgumentImpl.PredicateLeafImpl.class, "literal")
.build();
private static Object leafToLiteral(PredicateLeaf leaf) {
switch (leaf.getType()) {
case LONG:
case BOOLEAN:
case STRING:
case FLOAT:
return leaf.getLiteral();
case DATE:
return daysFromTimestamp((Timestamp) leaf.getLiteral());
case TIMESTAMP:
return microsFromTimestamp((Timestamp) LITERAL_FIELD.get(leaf));
case DECIMAL:
return hiveDecimalToBigDecimal((HiveDecimalWritable) leaf.getLiteral());
default:
throw new UnsupportedOperationException("Unknown type: " + leaf.getType());
}
}
private static List<Object> leafToLiteralList(PredicateLeaf leaf) {
switch (leaf.getType()) {
case LONG:
case BOOLEAN:
case FLOAT:
case STRING:
return leaf.getLiteralList();
case DATE:
return leaf.getLiteralList().stream().map(value -> daysFromDate((Date) value))
.collect(Collectors.toList());
case DECIMAL:
return leaf.getLiteralList().stream()
.map(value -> hiveDecimalToBigDecimal((HiveDecimalWritable) value))
.collect(Collectors.toList());
case TIMESTAMP:
return leaf.getLiteralList().stream()
.map(value -> microsFromTimestamp((Timestamp) value))
.collect(Collectors.toList());
default:
throw new UnsupportedOperationException("Unknown type: " + leaf.getType());
}
}
private static BigDecimal hiveDecimalToBigDecimal(HiveDecimalWritable hiveDecimalWritable) {
return hiveDecimalWritable.getHiveDecimal().bigDecimalValue().setScale(hiveDecimalWritable.scale());
}
private static int daysFromDate(Date date) {
return DateTimeUtil.daysFromDate(date.toLocalDate());
}
private static int daysFromTimestamp(Timestamp timestamp) {
return DateTimeUtil.daysFromInstant(timestamp.toInstant());
}
private static long microsFromTimestamp(Timestamp timestamp) {
return DateTimeUtil.microsFromInstant(timestamp.toInstant());
}
}
| 1 | 24,495 | I think we should check the type returned by `getLiteral` and handle that here. Then we won't need separate code for different versions. | apache-iceberg | java |
@@ -131,13 +131,13 @@ func (s *HandlerTestSuite) TestFetchX509SVID() {
func (s *HandlerTestSuite) TestSendResponse() {
emptyUpdate := new(cache.WorkloadUpdate)
s.stream.EXPECT().Send(gomock.Any()).Times(0)
- err := s.h.sendResponse(emptyUpdate, s.stream)
+ err := s.h.sendX509SVIDResponse(emptyUpdate, s.stream)
s.Assert().Error(err)
- resp, err := s.h.composeResponse(s.workloadUpdate())
+ resp, err := s.h.composeX509SVIDResponse(s.workloadUpdate())
s.Require().NoError(err)
s.stream.EXPECT().Send(resp)
- err = s.h.sendResponse(s.workloadUpdate(), s.stream)
+ err = s.h.sendX509SVIDResponse(s.workloadUpdate(), s.stream)
s.Assert().NoError(err)
}
| 1 | package workload
import (
"context"
"crypto/x509"
"errors"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/suite"
"github.com/spiffe/spire/pkg/agent/auth"
"github.com/spiffe/spire/pkg/agent/manager/cache"
"github.com/spiffe/spire/pkg/common/bundleutil"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/proto/agent/workloadattestor"
"github.com/spiffe/spire/proto/api/workload"
"github.com/spiffe/spire/proto/common"
"github.com/spiffe/spire/test/fakes/fakeagentcatalog"
"github.com/spiffe/spire/test/mock/agent/manager"
"github.com/spiffe/spire/test/mock/agent/manager/cache"
"github.com/spiffe/spire/test/mock/proto/agent/workloadattestor"
"github.com/spiffe/spire/test/mock/proto/api/workload"
"github.com/spiffe/spire/test/util"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
)
type HandlerTestSuite struct {
suite.Suite
h *Handler
ctrl *gomock.Controller
attestor *mock_workloadattestor.MockWorkloadAttestor
cache *mock_cache.MockCache
manager *mock_manager.MockManager
stream *mock_workload.MockSpiffeWorkloadAPI_FetchX509SVIDServer
}
func (s *HandlerTestSuite) SetupTest() {
mockCtrl := gomock.NewController(s.T())
log, _ := test.NewNullLogger()
s.attestor = mock_workloadattestor.NewMockWorkloadAttestor(mockCtrl)
s.cache = mock_cache.NewMockCache(mockCtrl)
s.manager = mock_manager.NewMockManager(mockCtrl)
s.stream = mock_workload.NewMockSpiffeWorkloadAPI_FetchX509SVIDServer(mockCtrl)
catalog := fakeagentcatalog.New()
catalog.SetWorkloadAttestors(s.attestor)
h := &Handler{
Manager: s.manager,
Catalog: catalog,
L: log,
T: telemetry.Blackhole{},
}
s.h = h
s.ctrl = mockCtrl
}
func TestWorkloadServer(t *testing.T) {
suite.Run(t, new(HandlerTestSuite))
}
func (s *HandlerTestSuite) TearDownTest() {
s.ctrl.Finish()
}
func (s *HandlerTestSuite) TestFetchX509SVID() {
// Without the security header
s.stream.EXPECT().Context().Return(context.Background())
err := s.h.FetchX509SVID(nil, s.stream)
s.Assert().Error(err)
// Without PID data
header := metadata.Pairs("workload.spiffe.io", "true")
ctx := context.Background()
ctx = metadata.NewIncomingContext(ctx, header)
s.stream.EXPECT().Context().Return(ctx)
err = s.h.FetchX509SVID(nil, s.stream)
s.Assert().Error(err)
p := &peer.Peer{
AuthInfo: auth.CallerInfo{
PID: 1,
},
}
ctx, cancel := context.WithCancel(context.Background())
ctx = peer.NewContext(ctx, p)
ctx = metadata.NewIncomingContext(ctx, header)
selectors := []*common.Selector{{Type: "foo", Value: "bar"}}
subscriber := mock_cache.NewMockSubscriber(s.ctrl)
subscription := make(chan *cache.WorkloadUpdate)
subscriber.EXPECT().Updates().Return(subscription).AnyTimes()
subscriber.EXPECT().Finish()
result := make(chan error)
s.stream.EXPECT().Context().Return(ctx).AnyTimes()
s.attestor.EXPECT().Attest(gomock.Any(), &workloadattestor.AttestRequest{Pid: int32(1)}).Return(&workloadattestor.AttestResponse{Selectors: selectors}, nil)
s.manager.EXPECT().SubscribeToCacheChanges(cache.Selectors{selectors[0]}).Return(subscriber)
s.stream.EXPECT().Send(gomock.Any())
go func() { result <- s.h.FetchX509SVID(nil, s.stream) }()
// Make sure it's still running...
select {
case err := <-result:
s.T().Errorf("hander exited immediately: %v", err)
case <-time.NewTimer(1 * time.Millisecond).C:
}
select {
case <-time.NewTimer(1 * time.Second).C:
s.T().Error("timeout sending update to workload handler")
case subscription <- s.workloadUpdate():
}
cancel()
select {
case err := <-result:
s.Assert().NoError(err)
case <-time.NewTimer(1 * time.Second).C:
s.T().Error("workload handler hung, shutdown timer exceeded")
}
}
func (s *HandlerTestSuite) TestSendResponse() {
emptyUpdate := new(cache.WorkloadUpdate)
s.stream.EXPECT().Send(gomock.Any()).Times(0)
err := s.h.sendResponse(emptyUpdate, s.stream)
s.Assert().Error(err)
resp, err := s.h.composeResponse(s.workloadUpdate())
s.Require().NoError(err)
s.stream.EXPECT().Send(resp)
err = s.h.sendResponse(s.workloadUpdate(), s.stream)
s.Assert().NoError(err)
}
func (s *HandlerTestSuite) TestComposeResponse() {
update := s.workloadUpdate()
keyData, err := x509.MarshalPKCS8PrivateKey(update.Entries[0].PrivateKey)
s.Require().NoError(err)
svidMsg := &workload.X509SVID{
SpiffeId: "spiffe://example.org/foo",
X509Svid: update.Entries[0].SVID[0].Raw,
X509SvidKey: keyData,
Bundle: update.Bundle.RootCAs()[0].Raw,
FederatesWith: []string{"spiffe://otherdomain.test"},
}
apiMsg := &workload.X509SVIDResponse{
Svids: []*workload.X509SVID{svidMsg},
FederatedBundles: map[string][]byte{
"spiffe://otherdomain.test": update.Bundle.RootCAs()[0].Raw,
},
}
resp, err := s.h.composeResponse(s.workloadUpdate())
s.Assert().NoError(err)
s.Assert().Equal(apiMsg, resp)
}
func (s *HandlerTestSuite) TestCallerPID() {
p := &peer.Peer{
AuthInfo: auth.CallerInfo{
PID: 1,
},
}
ctx := peer.NewContext(context.Background(), p)
pid, err := s.h.callerPID(ctx)
s.Assert().NoError(err)
s.Assert().Equal(int32(1), pid)
// Couldn't get PID via socket opt
p = &peer.Peer{
AuthInfo: auth.CallerInfo{
PID: 0,
Err: errors.New("i'm an error"),
},
}
ctx = peer.NewContext(context.Background(), p)
_, err = s.h.callerPID(ctx)
s.Assert().Error(err)
// Implementation error - custom auth creds not in use
p.AuthInfo = nil
ctx = peer.NewContext(context.Background(), p)
_, err = s.h.callerPID(ctx)
s.Assert().Error(err)
}
func (s *HandlerTestSuite) workloadUpdate() *cache.WorkloadUpdate {
svid, key, err := util.LoadSVIDFixture()
s.Require().NoError(err)
ca, _, err := util.LoadCAFixture()
s.Require().NoError(err)
entry := cache.Entry{
SVID: []*x509.Certificate{svid},
PrivateKey: key,
RegistrationEntry: &common.RegistrationEntry{
SpiffeId: "spiffe://example.org/foo",
FederatesWith: []string{"spiffe://otherdomain.test"},
},
}
update := &cache.WorkloadUpdate{
Entries: []*cache.Entry{&entry},
Bundle: bundleutil.BundleFromRootCA("spiffe://example.org", ca),
FederatedBundles: map[string]*bundleutil.Bundle{
"spiffe://otherdomain.test": bundleutil.BundleFromRootCA("spiffe://otherdomain.test", ca),
},
}
return update
}
| 1 | 10,029 | Tests for JWT handler functionality? | spiffe-spire | go |
@@ -548,9 +548,6 @@ namespace Microsoft.AspNetCore.Server.KestrelTests
"POST / HTTP/1.1",
"Content-Length: 3",
"",
- "101POST / HTTP/1.1",
- "Content-Length: 3",
- "",
"204POST / HTTP/1.1",
"Content-Length: 3",
"", | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Sockets;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel;
using Microsoft.AspNetCore.Server.Kestrel.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Http;
using Microsoft.AspNetCore.Testing;
using Microsoft.Extensions.Internal;
using Xunit;
namespace Microsoft.AspNetCore.Server.KestrelTests
{
/// <summary>
/// Summary description for EngineTests
/// </summary>
public class EngineTests
{
public static TheoryData<TestServiceContext> ConnectionFilterData
{
get
{
return new TheoryData<TestServiceContext>
{
{
new TestServiceContext()
},
{
new TestServiceContext(new PassThroughConnectionFilter())
}
};
}
}
private async Task App(HttpContext httpContext)
{
var request = httpContext.Request;
var response = httpContext.Response;
while (true)
{
var buffer = new byte[8192];
var count = await request.Body.ReadAsync(buffer, 0, buffer.Length);
if (count == 0)
{
break;
}
await response.Body.WriteAsync(buffer, 0, count);
}
}
private async Task AppChunked(HttpContext httpContext)
{
var request = httpContext.Request;
var response = httpContext.Response;
var data = new MemoryStream();
await request.Body.CopyToAsync(data);
var bytes = data.ToArray();
response.Headers["Content-Length"] = bytes.Length.ToString();
await response.Body.WriteAsync(bytes, 0, bytes.Length);
}
private Task EmptyApp(HttpContext httpContext)
{
return Task.FromResult<object>(null);
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public void EngineCanStartAndStop(TestServiceContext testContext)
{
var engine = new KestrelEngine(testContext);
engine.Start(1);
engine.Dispose();
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public void ListenerCanCreateAndDispose(TestServiceContext testContext)
{
testContext.App = App;
var engine = new KestrelEngine(testContext);
engine.Start(1);
var address = ServerAddress.FromUrl("http://127.0.0.1:0/");
var started = engine.CreateServer(address);
started.Dispose();
engine.Dispose();
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public void ConnectionCanReadAndWrite(TestServiceContext testContext)
{
testContext.App = App;
var engine = new KestrelEngine(testContext);
engine.Start(1);
var address = ServerAddress.FromUrl("http://127.0.0.1:0/");
var started = engine.CreateServer(address);
var socket = TestConnection.CreateConnectedLoopbackSocket(address.Port);
socket.Send(Encoding.ASCII.GetBytes("POST / HTTP/1.0\r\nContent-Length: 11\r\n\r\nHello World"));
socket.Shutdown(SocketShutdown.Send);
var buffer = new byte[8192];
while (true)
{
var length = socket.Receive(buffer);
if (length == 0) { break; }
var text = Encoding.ASCII.GetString(buffer, 0, length);
}
started.Dispose();
engine.Dispose();
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task Http10RequestReceivesHttp11Response(TestServiceContext testContext)
{
using (var server = new TestServer(App, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"POST / HTTP/1.0",
"Content-Length: 11",
"",
"Hello World");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"",
"Hello World");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task Http11(TestServiceContext testContext)
{
using (var server = new TestServer(AppChunked, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"GET / HTTP/1.1",
"Connection: close",
"Content-Length: 7",
"",
"Goodbye");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 7",
"",
"Goodbye");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task HeadersAndStreamsAreReused(TestServiceContext testContext)
{
var streamCount = 0;
var requestHeadersCount = 0;
var responseHeadersCount = 0;
var loopCount = 20;
Stream lastStream = null;
IHeaderDictionary lastRequestHeaders = null;
IHeaderDictionary lastResponseHeaders = null;
using (var server = new TestServer(
async context =>
{
if (context.Request.Body != lastStream)
{
lastStream = context.Request.Body;
streamCount++;
}
if (context.Request.Headers != lastRequestHeaders)
{
lastRequestHeaders = context.Request.Headers;
requestHeadersCount++;
}
if (context.Response.Headers != lastResponseHeaders)
{
lastResponseHeaders = context.Response.Headers;
responseHeadersCount++;
}
var ms = new MemoryStream();
await context.Request.Body.CopyToAsync(ms);
var request = ms.ToArray();
context.Response.ContentLength = request.Length;
await context.Response.Body.WriteAsync(request, 0, request.Length);
},
testContext))
{
using (var connection = server.CreateConnection())
{
var requestData =
Enumerable.Repeat("GET / HTTP/1.1\r\n", loopCount)
.Concat(new[] { "GET / HTTP/1.1\r\nContent-Length: 7\r\nConnection: close\r\n\r\nGoodbye" });
var response = string.Join("\r\n", new string[] {
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
""});
var lastResponse = string.Join("\r\n", new string[]
{
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 7",
"",
"Goodbye"
});
var responseData =
Enumerable.Repeat(response, loopCount)
.Concat(new[] { lastResponse });
await connection.SendEnd(requestData.ToArray());
await connection.ReceiveEnd(responseData.ToArray());
}
Assert.Equal(1, streamCount);
Assert.Equal(1, requestHeadersCount);
Assert.Equal(1, responseHeadersCount);
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task Http10ContentLength(TestServiceContext testContext)
{
using (var server = new TestServer(App, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"POST / HTTP/1.0",
"Content-Length: 11",
"",
"Hello World");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"",
"Hello World");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task Http10KeepAlive(TestServiceContext testContext)
{
using (var server = new TestServer(AppChunked, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.0",
"Connection: keep-alive",
"",
"POST / HTTP/1.0",
"Content-Length: 7",
"",
"Goodbye");
await connection.Receive(
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"\r\n");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 7",
"",
"Goodbye");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task Http10KeepAliveNotUsedIfResponseContentLengthNotSet(TestServiceContext testContext)
{
using (var server = new TestServer(App, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.0",
"Connection: keep-alive",
"",
"POST / HTTP/1.0",
"Connection: keep-alive",
"Content-Length: 7",
"",
"Goodbye");
await connection.Receive(
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"\r\n");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"",
"Goodbye");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task Http10KeepAliveContentLength(TestServiceContext testContext)
{
using (var server = new TestServer(AppChunked, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"POST / HTTP/1.0",
"Content-Length: 11",
"Connection: keep-alive",
"",
"Hello WorldPOST / HTTP/1.0",
"Content-Length: 7",
"",
"Goodbye");
await connection.Receive(
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello World");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 7",
"",
"Goodbye");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task Expect100ContinueForBody(TestServiceContext testContext)
{
using (var server = new TestServer(AppChunked, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Expect: 100-continue",
"Connection: close",
"Content-Length: 11",
"\r\n");
await connection.Receive("HTTP/1.1 100 Continue", "\r\n");
await connection.SendEnd("Hello World");
await connection.Receive(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello World");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task DisconnectingClient(TestServiceContext testContext)
{
using (var server = new TestServer(App, testContext))
{
var socket = TestConnection.CreateConnectedLoopbackSocket(server.Port);
await Task.Delay(200);
socket.Dispose();
await Task.Delay(200);
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.0",
"\r\n");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"\r\n");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ZeroContentLengthSetAutomaticallyAfterNoWrites(TestServiceContext testContext)
{
using (var server = new TestServer(EmptyApp, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"GET / HTTP/1.0",
"Connection: keep-alive",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ZeroContentLengthSetAutomaticallyForNonKeepAliveRequests(TestServiceContext testContext)
{
using (var server = new TestServer(async httpContext =>
{
Assert.Equal(0, await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1).TimeoutAfter(TimeSpan.FromSeconds(10)));
}, testContext))
{
using (var connection = server.CreateConnection())
{
// Use Send instead of SendEnd to ensure the connection will remain open while
// the app runs and reads 0 bytes from the body nonetheless. This checks that
// https://github.com/aspnet/KestrelHttpServer/issues/1104 is not regressing.
await connection.Send(
"GET / HTTP/1.1",
"Connection: close",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ZeroContentLengthNotSetAutomaticallyForHeadRequests(TestServiceContext testContext)
{
using (var server = new TestServer(EmptyApp, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"HEAD / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ZeroContentLengthNotSetAutomaticallyForCertainStatusCodes(TestServiceContext testContext)
{
using (var server = new TestServer(async httpContext =>
{
var request = httpContext.Request;
var response = httpContext.Response;
using (var reader = new StreamReader(request.Body, Encoding.ASCII))
{
var statusString = await reader.ReadLineAsync();
response.StatusCode = int.Parse(statusString);
}
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"POST / HTTP/1.1",
"Content-Length: 3",
"",
"101POST / HTTP/1.1",
"Content-Length: 3",
"",
"204POST / HTTP/1.1",
"Content-Length: 3",
"",
"205POST / HTTP/1.1",
"Content-Length: 3",
"",
"304POST / HTTP/1.1",
"Content-Length: 3",
"",
"200");
await connection.ReceiveEnd(
"HTTP/1.1 101 Switching Protocols",
$"Date: {testContext.DateHeaderValue}",
"",
"HTTP/1.1 204 No Content",
$"Date: {testContext.DateHeaderValue}",
"",
"HTTP/1.1 205 Reset Content",
$"Date: {testContext.DateHeaderValue}",
"",
"HTTP/1.1 304 Not Modified",
$"Date: {testContext.DateHeaderValue}",
"",
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ThrowingResultsIn500Response(TestServiceContext testContext)
{
bool onStartingCalled = false;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(httpContext =>
{
var response = httpContext.Response;
response.OnStarting(_ =>
{
onStartingCalled = true;
return Task.FromResult<object>(null);
}, null);
// Anything added to the ResponseHeaders dictionary is ignored
response.Headers["Content-Length"] = "11";
throw new Exception();
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"GET / HTTP/1.1",
"Connection: close",
"",
"");
await connection.Receive(
"HTTP/1.1 500 Internal Server Error",
"");
await connection.Receive(
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 500 Internal Server Error",
"");
await connection.Receive("Connection: close",
"");
await connection.ReceiveEnd(
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
Assert.False(onStartingCalled);
Assert.Equal(2, testLogger.ApplicationErrorsLogged);
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ThrowingAfterWritingKillsConnection(TestServiceContext testContext)
{
bool onStartingCalled = false;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
response.OnStarting(_ =>
{
onStartingCalled = true;
return Task.FromResult<object>(null);
}, null);
response.Headers["Content-Length"] = new[] { "11" };
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11);
throw new Exception();
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello World");
Assert.True(onStartingCalled);
Assert.Equal(1, testLogger.ApplicationErrorsLogged);
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ThrowingAfterPartialWriteKillsConnection(TestServiceContext testContext)
{
bool onStartingCalled = false;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
response.OnStarting(_ =>
{
onStartingCalled = true;
return Task.FromResult<object>(null);
}, null);
response.Headers["Content-Length"] = new[] { "11" };
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello"), 0, 5);
throw new Exception();
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello");
Assert.True(onStartingCalled);
Assert.Equal(1, testLogger.ApplicationErrorsLogged);
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ConnectionClosesWhenFinReceived(TestServiceContext testContext)
{
using (var server = new TestServer(AppChunked, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"Post / HTTP/1.1",
"Content-Length: 7",
"",
"Goodbye");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 7",
"",
"Goodbye");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ConnectionClosesWhenFinReceivedBeforeRequestCompletes(TestServiceContext testContext)
{
using (var server = new TestServer(AppChunked, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"POST / HTTP/1.1");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 400 Bad Request",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"POST / HTTP/1.1",
"Content-Length: 7");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 400 Bad Request",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ThrowingInOnStartingResultsInFailedWritesAnd500Response(TestServiceContext testContext)
{
var onStartingCallCount1 = 0;
var onStartingCallCount2 = 0;
var failedWriteCount = 0;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var onStartingException = new Exception();
var response = httpContext.Response;
response.OnStarting(_ =>
{
onStartingCallCount1++;
throw onStartingException;
}, null);
response.OnStarting(_ =>
{
onStartingCallCount2++;
throw onStartingException;
}, null);
response.Headers["Content-Length"] = new[] { "11" };
var writeException = await Assert.ThrowsAsync<ObjectDisposedException>(async () =>
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11));
Assert.Same(onStartingException, writeException.InnerException);
failedWriteCount++;
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"GET / HTTP/1.1",
"Connection: close",
"",
"");
await connection.Receive(
"HTTP/1.1 500 Internal Server Error",
"");
await connection.Receive(
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 500 Internal Server Error",
"Connection: close",
"");
await connection.ReceiveEnd(
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
Assert.Equal(2, onStartingCallCount2);
// The first registered OnStarting callback should not be called,
// since they are called LIFO and the other one failed.
Assert.Equal(0, onStartingCallCount1);
Assert.Equal(2, testLogger.ApplicationErrorsLogged);
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ThrowingInOnCompletedIsLoggedAndClosesConnection(TestServiceContext testContext)
{
var onCompletedCalled1 = false;
var onCompletedCalled2 = false;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
response.OnCompleted(_ =>
{
onCompletedCalled1 = true;
throw new Exception();
}, null);
response.OnCompleted(_ =>
{
onCompletedCalled2 = true;
throw new Exception();
}, null);
response.Headers["Content-Length"] = new[] { "11" };
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11);
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello World");
}
// All OnCompleted callbacks should be called even if they throw.
Assert.Equal(2, testLogger.ApplicationErrorsLogged);
Assert.True(onCompletedCalled1);
Assert.True(onCompletedCalled2);
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task RequestsCanBeAbortedMidRead(TestServiceContext testContext)
{
var readTcs = new TaskCompletionSource<object>();
var registrationTcs = new TaskCompletionSource<int>();
var requestId = 0;
using (var server = new TestServer(async httpContext =>
{
requestId++;
var response = httpContext.Response;
var request = httpContext.Request;
var lifetime = httpContext.Features.Get<IHttpRequestLifetimeFeature>();
lifetime.RequestAborted.Register(() => registrationTcs.TrySetResult(requestId));
if (requestId == 1)
{
response.Headers["Content-Length"] = new[] { "5" };
await response.WriteAsync("World");
}
else
{
var readTask = request.Body.CopyToAsync(Stream.Null);
lifetime.Abort();
try
{
await readTask;
}
catch (Exception ex)
{
readTcs.SetException(ex);
throw;
}
readTcs.SetException(new Exception("This shouldn't be reached."));
}
}, testContext))
{
using (var connection = server.CreateConnection())
{
// Never send the body so CopyToAsync always fails.
await connection.Send(
"POST / HTTP/1.1",
"Content-Length: 5",
"",
"HelloPOST / HTTP/1.1",
"Content-Length: 5",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 5",
"",
"World");
}
}
await Assert.ThrowsAsync<TaskCanceledException>(async () => await readTcs.Task);
// The cancellation token for only the last request should be triggered.
var abortedRequestId = await registrationTcs.Task;
Assert.Equal(2, abortedRequestId);
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task FailedWritesResultInAbortedRequest(TestServiceContext testContext)
{
// This should match _maxBytesPreCompleted in SocketOutput
var maxBytesPreCompleted = 65536;
// Ensure string is long enough to disable write-behind buffering
var largeString = new string('a', maxBytesPreCompleted + 1);
var writeTcs = new TaskCompletionSource<object>();
var registrationWh = new ManualResetEventSlim();
var connectionCloseWh = new ManualResetEventSlim();
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
var request = httpContext.Request;
var lifetime = httpContext.Features.Get<IHttpRequestLifetimeFeature>();
lifetime.RequestAborted.Register(() => registrationWh.Set());
await request.Body.CopyToAsync(Stream.Null);
connectionCloseWh.Wait();
try
{
// Ensure write is long enough to disable write-behind buffering
for (int i = 0; i < 100; i++)
{
await response.WriteAsync(largeString, lifetime.RequestAborted);
registrationWh.Wait(1000);
}
}
catch (Exception ex)
{
writeTcs.SetException(ex);
throw;
}
writeTcs.SetException(new Exception("This shouldn't be reached."));
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Content-Length: 5",
"",
"Hello");
// Don't wait to receive the response. Just close the socket.
}
connectionCloseWh.Set();
// Write failed
await Assert.ThrowsAsync<TaskCanceledException>(async () => await writeTcs.Task);
// RequestAborted tripped
Assert.True(registrationWh.Wait(1000));
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task NoErrorsLoggedWhenServerEndsConnectionBeforeClient(TestServiceContext testContext)
{
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
response.Headers["Content-Length"] = new[] { "11" };
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11);
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello World");
}
}
Assert.Equal(0, testLogger.TotalErrorsLogged);
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task NoResponseSentWhenConnectionIsClosedByServerBeforeClientFinishesSendingRequest(TestServiceContext testContext)
{
using (var server = new TestServer(httpContext =>
{
httpContext.Abort();
return TaskCache.CompletedTask;
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.0",
"Content-Length: 1",
"",
"");
await connection.ReceiveForcedEnd();
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task RequestHeadersAreResetOnEachRequest(TestServiceContext testContext)
{
IHeaderDictionary originalRequestHeaders = null;
var firstRequest = true;
using (var server = new TestServer(httpContext =>
{
var requestFeature = httpContext.Features.Get<IHttpRequestFeature>();
if (firstRequest)
{
originalRequestHeaders = requestFeature.Headers;
requestFeature.Headers = new FrameRequestHeaders();
firstRequest = false;
}
else
{
Assert.Same(originalRequestHeaders, requestFeature.Headers);
}
return TaskCache.CompletedTask;
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task ResponseHeadersAreResetOnEachRequest(TestServiceContext testContext)
{
IHeaderDictionary originalResponseHeaders = null;
var firstRequest = true;
using (var server = new TestServer(httpContext =>
{
var responseFeature = httpContext.Features.Get<IHttpResponseFeature>();
if (firstRequest)
{
originalResponseHeaders = responseFeature.Headers;
responseFeature.Headers = new FrameResponseHeaders();
firstRequest = false;
}
else
{
Assert.Same(originalResponseHeaders, responseFeature.Headers);
}
return TaskCache.CompletedTask;
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[InlineData("/%%2000", "/% 00")]
[InlineData("/%25%30%30", "/%00")]
public async Task PathEscapeTests(string inputPath, string expectedPath)
{
using (var server = new TestServer(async httpContext =>
{
var path = httpContext.Request.Path.Value;
httpContext.Response.Headers["Content-Length"] = new[] {path.Length.ToString() };
await httpContext.Response.WriteAsync(path);
}))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
$"GET {inputPath} HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Content-Length: {expectedPath.Length.ToString()}",
"",
$"{expectedPath}");
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task OnStartingCallbacksAreCalledInLastInFirstOutOrder(TestServiceContext testContext)
{
const string response = "hello, world";
var callOrder = new Stack<int>();
using (var server = new TestServer(async context =>
{
context.Response.OnStarting(_ =>
{
callOrder.Push(1);
return TaskCache.CompletedTask;
}, null);
context.Response.OnStarting(_ =>
{
callOrder.Push(2);
return TaskCache.CompletedTask;
}, null);
context.Response.ContentLength = response.Length;
await context.Response.WriteAsync(response);
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
$"Content-Length: {response.Length}",
"",
"hello, world");
Assert.Equal(1, callOrder.Pop());
Assert.Equal(2, callOrder.Pop());
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task OnCompletedCallbacksAreCalledInLastInFirstOutOrder(TestServiceContext testContext)
{
const string response = "hello, world";
var callOrder = new Stack<int>();
using (var server = new TestServer(async context =>
{
context.Response.OnCompleted(_ =>
{
callOrder.Push(1);
return TaskCache.CompletedTask;
}, null);
context.Response.OnCompleted(_ =>
{
callOrder.Push(2);
return TaskCache.CompletedTask;
}, null);
context.Response.ContentLength = response.Length;
await context.Response.WriteAsync(response);
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
$"Content-Length: {response.Length}",
"",
"hello, world");
Assert.Equal(1, callOrder.Pop());
Assert.Equal(2, callOrder.Pop());
}
}
}
[Theory]
[MemberData(nameof(ConnectionFilterData))]
public async Task UpgradeRequestIsNotKeptAliveOrChunked(TestServiceContext testContext)
{
using (var server = new TestServer(async context =>
{
var upgradeFeature = context.Features.Get<IHttpUpgradeFeature>();
var duplexStream = await upgradeFeature.UpgradeAsync();
while (true)
{
var buffer = new byte[8192];
var count = await duplexStream.ReadAsync(buffer, 0, buffer.Length);
if (count == 0)
{
break;
}
await duplexStream.WriteAsync(buffer, 0, count);
}
}, testContext))
{
using (var connection = server.CreateConnection())
{
await connection.SendEnd(
"GET / HTTP/1.1",
"Connection: Upgrade",
"",
"Hello World");
await connection.ReceiveEnd(
"HTTP/1.1 101 Switching Protocols",
"Connection: Upgrade",
$"Date: {testContext.DateHeaderValue}",
"",
"Hello World");
}
}
}
}
}
| 1 | 10,251 | It's weird to set a 101 response and complete without writing anything, but this is still a valid test case right? | aspnet-KestrelHttpServer | .cs |
@@ -207,6 +207,19 @@ class DefaultBucketViewTest(FormattedErrorMixin, BaseWebTest,
self.assertEqual(resp.body, response.body)
+class HelloViewTest(BaseWebTest, unittest.TestCase):
+
+ def test_returns_bucket_id_and_url_if_authenticated(self):
+ response = self.app.get('/', headers=self.headers)
+ self.assertEqual(response.json['user']['bucket'],
+ '23bb0efc-e80d-829e-6757-79d41e16640f')
+
+ def test_flush_capability_if_enabled(self):
+ resp = self.app.get('/')
+ capabilities = resp.json['capabilities']
+ self.assertIn('default_bucket', capabilities)
+
+
class ReadonlyDefaultBucket(BaseWebTest, unittest.TestCase):
def get_app_settings(self, extras=None): | 1 | import mock
from six import text_type
from uuid import UUID
from pyramid.httpexceptions import HTTPBadRequest
from cliquet.errors import ERRORS, http_error
from cliquet.storage import exceptions as storage_exceptions
from cliquet.tests.support import FormattedErrorMixin
from cliquet.utils import hmac_digest
from kinto.tests.support import (BaseWebTest, unittest, get_user_headers,
MINIMALIST_RECORD)
class DefaultBucketViewTest(FormattedErrorMixin, BaseWebTest,
unittest.TestCase):
bucket_url = '/buckets/default'
collection_url = '/buckets/default/collections/tasks'
def test_default_bucket_exists_and_has_user_id(self):
bucket = self.app.get(self.bucket_url, headers=self.headers)
result = bucket.json
settings = self.app.app.registry.settings
hmac_secret = settings['userid_hmac_secret']
bucket_id = hmac_digest(hmac_secret, self.principal)[:32]
self.assertEqual(result['data']['id'], text_type(UUID(bucket_id)))
self.assertEqual(result['permissions']['write'], [self.principal])
def test_default_bucket_can_still_be_explicitly_created(self):
bucket = {'permissions': {'read': ['system.Everyone']}}
resp = self.app.put_json(self.bucket_url, bucket, headers=self.headers)
result = resp.json
self.assertIn('system.Everyone', result['permissions']['read'])
def test_default_bucket_collections_are_automatically_created(self):
self.app.get(self.collection_url, headers=self.headers, status=200)
def test_adding_a_task_for_bob_doesnt_add_it_for_alice(self):
record = MINIMALIST_RECORD.copy()
resp = self.app.post_json(self.collection_url + '/records',
record, headers=get_user_headers('bob'))
record_id = self.collection_url + '/records/' + resp.json['data']['id']
resp = self.app.get(record_id, headers=get_user_headers('alice'),
status=404)
def test_unauthenticated_bucket_access_raises_json_401(self):
resp = self.app.get(self.bucket_url, status=401)
self.assertEquals(resp.json['message'],
'Please authenticate yourself to use this endpoint.')
def test_bucket_id_is_an_uuid_with_dashes(self):
bucket = self.app.get(self.bucket_url, headers=self.headers)
bucket_id = bucket.json['data']['id']
self.assertIn('-', bucket_id)
try:
UUID(bucket_id)
except ValueError:
self.fail('bucket_id: %s is not a valid UUID.' % bucket_id)
def test_second_call_on_default_bucket_doesnt_raise_a_412(self):
self.app.get(self.bucket_url, headers=self.headers)
self.app.get(self.bucket_url, headers=self.headers)
def test_second_call_on_default_bucket_collection_doesnt_raise_a_412(self):
self.app.get(self.collection_url, headers=self.headers)
self.app.get(self.collection_url, headers=self.headers)
def test_querystring_parameters_are_taken_into_account(self):
self.app.get(self.collection_url + '/records?_since=invalid',
headers=self.headers,
status=400)
def test_option_is_possible_without_authentication_for_default(self):
headers = 'authorization,content-type'
self.app.options(self.collection_url + '/records',
headers={
'Origin': 'http://localhost:8000',
'Access-Control-Request-Method': 'GET',
'Access-Control-Request-Headers': headers})
def test_cors_headers_are_provided_on_errors(self):
resp = self.app.post_json(self.collection_url + '/records',
MINIMALIST_RECORD,
headers=self.headers)
current = resp.json['data']['last_modified']
headers = self.headers.copy()
headers.update({
'Origin': 'http://localhost:8000',
'If-None-Match': ('"%s"' % current).encode('utf-8')
})
resp = self.app.get(self.collection_url + '/records',
headers=headers, status=304)
self.assertIn('Access-Control-Allow-Origin', resp.headers)
self.assertIn('ETag', resp.headers['Access-Control-Expose-Headers'])
def test_etag_is_present_and_exposed_in_304_error(self):
resp = self.app.post_json(self.collection_url + '/records',
MINIMALIST_RECORD,
headers=self.headers)
current = resp.json['data']['last_modified']
headers = self.headers.copy()
headers.update({
'Origin': 'http://localhost:8000',
'If-None-Match': ('"%s"' % current).encode('utf-8')
})
resp = self.app.get(self.collection_url + '/records',
headers=headers, status=304)
self.assertIn('Access-Control-Expose-Headers', resp.headers)
self.assertIn('ETag', resp.headers)
self.assertIn('ETag', resp.headers['Access-Control-Expose-Headers'])
def test_bucket_id_starting_with_default_can_still_be_created(self):
# We need to create the bucket first since it is not the default bucket
resp = self.app.put(
self.bucket_url.replace('default', 'default-1234'),
headers=self.headers, status=201)
bucket_id = resp.json['data']['id']
self.assertEquals(bucket_id, 'default-1234')
# We can then create the collection
collection_url = '/buckets/default-1234/collections/default'
self.app.put(
collection_url,
headers=self.headers,
status=201)
resp = self.app.get('/buckets/default-1234/collections',
headers=self.headers)
self.assertEquals(resp.json['data'][0]['id'], 'default')
def test_default_bucket_objects_are_checked_only_once_in_batch(self):
batch = {'requests': []}
nb_create = 25
for i in range(nb_create):
request = {'method': 'POST',
'path': self.collection_url + '/records',
'body': MINIMALIST_RECORD}
batch['requests'].append(request)
with mock.patch.object(self.storage, 'create',
wraps=self.storage.create) as patched:
self.app.post_json('/batch', batch, headers=self.headers)
self.assertEqual(patched.call_count, nb_create + 2)
def test_parent_collection_is_taken_from_the_one_created_in_batch(self):
batch = {'requests': []}
nb_create = 25
for i in range(nb_create):
request = {'method': 'POST',
'path': self.collection_url + '/records',
'body': MINIMALIST_RECORD}
batch['requests'].append(request)
with mock.patch.object(self.storage, 'get',
wraps=self.storage.get) as patched:
self.app.post_json('/batch', batch, headers=self.headers)
self.assertEqual(patched.call_count, 0)
def test_parent_collection_is_taken_from_the_one_checked_in_batch(self):
# Create it first.
self.app.put(self.collection_url, headers=self.headers, status=201)
batch = {'requests': []}
nb_create = 25
for i in range(nb_create):
request = {'method': 'POST',
'path': self.collection_url + '/records',
'body': MINIMALIST_RECORD}
batch['requests'].append(request)
with mock.patch.object(self.storage, 'get',
wraps=self.storage.get) as patched:
self.app.post_json('/batch', batch, headers=self.headers)
self.assertEqual(patched.call_count, 0)
def test_collection_id_is_validated(self):
collection_url = '/buckets/default/collections/__files__/records'
self.app.get(collection_url, headers=self.headers, status=400)
def test_collection_id_does_not_support_unicode(self):
collection_url = '/buckets/default/collections/%E8%A6%8B/records'
self.app.get(collection_url, headers=self.headers, status=400)
def test_405_is_a_valid_formatted_error(self):
response = self.app.post(self.collection_url,
headers=self.headers, status=405)
self.assertFormattedError(
response, 405, ERRORS.METHOD_NOT_ALLOWED, "Method Not Allowed",
"Method not allowed on this endpoint.")
def test_formatted_error_are_passed_through(self):
response = http_error(HTTPBadRequest(),
errno=ERRORS.INVALID_PARAMETERS,
message='Yop')
with mock.patch.object(self.storage, 'create') as mocked:
mocked.side_effect = [
{"id": "abc", "last_modified": 43},
{"id": "abc", "last_modified": 44},
response
]
resp = self.app.post(self.collection_url + '/records',
headers=self.headers,
status=400)
self.assertEqual(resp.body, response.body)
class ReadonlyDefaultBucket(BaseWebTest, unittest.TestCase):
def get_app_settings(self, extras=None):
settings = super(ReadonlyDefaultBucket, self).get_app_settings(extras)
settings['readonly'] = True
return settings
def test_implicit_creation_is_rejected(self):
self.app.get('/buckets/default', headers=self.headers, status=405)
class BackendErrorTest(BaseWebTest, unittest.TestCase):
def setUp(self):
super(BackendErrorTest, self).setUp()
self.patcher = mock.patch.object(
self.storage, 'create',
side_effect=storage_exceptions.BackendError())
self.addCleanup(self.patcher.stop)
def test_implicit_bucket_creation_raises_503_if_backend_fails(self):
self.patcher.start()
self.app.get('/buckets/default', headers=self.headers, status=503)
def test_implicit_collection_creation_raises_503_if_backend_fails(self):
self.app.get('/buckets/default', headers=self.headers)
self.patcher.start()
self.app.get('/buckets/default/collections/articles',
headers=self.headers, status=503)
| 1 | 8,942 | Should we move that information in the capability itself? | Kinto-kinto | py |
@@ -18,6 +18,9 @@ class StatisticsElement(Chart):
__abstract = True
+ # Ensure Interface does not add an index
+ _auto_indexable_1d = False
+
def __init__(self, data, kdims=None, vdims=None, **params):
if isinstance(data, Element):
params.update(get_param_values(data)) | 1 | import param
import numpy as np
from ..core.dimension import Dimension, process_dimensions
from ..core.data import Dataset
from ..core.element import Element, Element2D
from ..core.util import get_param_values, OrderedDict
from .chart import Chart, BoxWhisker
class StatisticsElement(Chart):
"""
StatisticsElement provides a baseclass for Element types that
compute statistics based on the input data. The baseclass
overrides standard Dataset methods emulating the existence
of the value dimensions.
"""
__abstract = True
def __init__(self, data, kdims=None, vdims=None, **params):
if isinstance(data, Element):
params.update(get_param_values(data))
kdims = kdims or data.dimensions()[:len(self.kdims)]
data = tuple(data.dimension_values(d) for d in kdims)
params.update(dict(kdims=kdims, vdims=[], _validate_vdims=False))
super(StatisticsElement, self).__init__(data, **params)
if not vdims:
self.vdims = [Dimension('Density')]
elif len(vdims) > 1:
raise ValueError("%s expects at most one vdim." %
type(self).__name__)
else:
self.vdims = process_dimensions(None, vdims)['vdims']
def range(self, dim, data_range=True):
iskdim = self.get_dimension(dim) not in self.vdims
return super(StatisticsElement, self).range(dim, data_range=iskdim)
def dimension_values(self, dim, expanded=True, flat=True):
"""
Returns the values along a particular dimension. If unique
values are requested will return only unique values.
"""
dim = self.get_dimension(dim, strict=True)
if dim in self.vdims:
return np.full(len(self), np.NaN)
return self.interface.values(self, dim, expanded, flat)
def get_dimension_type(self, dim):
"""
Returns the specified Dimension type if specified or
if the dimension_values types are consistent otherwise
None is returned.
"""
dim = self.get_dimension(dim)
if dim is None:
return None
elif dim.type is not None:
return dim.type
elif dim in self.vdims:
return np.float64
return self.interface.dimension_type(self, dim)
def dframe(self, dimensions=None):
"""
Returns the data in the form of a DataFrame. Supplying a list
of dimensions filters the dataframe. If the data is already
a DataFrame a copy is returned.
"""
if dimensions:
dimensions = [self.get_dimension(d, strict=True) for d in dimensions
if d in dimensions.kdims]
else:
dimensions = self.kdims
return self.interface.dframe(self, dimensions)
def columns(self, dimensions=None):
if dimensions is None:
dimensions = self.kdims
else:
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
return OrderedDict([(d.name, self.dimension_values(d))
for d in dimensions if d in self.kdims])
class Bivariate(StatisticsElement):
"""
Bivariate elements are containers for two dimensional data,
which is to be visualized as a kernel density estimate. The
data should be supplied in a tabular format of x- and y-columns.
"""
kdims = param.List(default=[Dimension('x'), Dimension('y')],
bounds=(2, 2))
vdims = param.List(default=[Dimension('Density')], bounds=(0,1))
group = param.String(default="Bivariate", constant=True)
class Distribution(StatisticsElement):
"""
Distribution elements provides a representation for a
one-dimensional distribution which can be visualized as a kernel
density estimate. The data should be supplied in a tabular format
and will use the first column.
"""
kdims = param.List(default=[Dimension('Value')], bounds=(1, 1))
group = param.String(default='Distribution', constant=True)
vdims = param.List(default=[Dimension('Density')], bounds=(0, 1))
# Ensure Interface does not add an index
_auto_indexable_1d = False
class Violin(BoxWhisker):
"""
Violin elements represent data as 1D distributions visualized
as a kernel-density estimate. It may have a single value dimension
and any number of key dimensions declaring the grouping of each
violin.
"""
group = param.String(default='Violin', constant=True)
class HexTiles(Dataset, Element2D):
"""
HexTiles is a statistical element with a visual representation
that renders a density map of the data values as a hexagonal grid.
Before display the data is aggregated either by counting the values
in each hexagonal bin or by computing aggregates.
"""
group = param.String(default='HexTiles', constant=True)
kdims = param.List(default=[Dimension('x'), Dimension('y')],
bounds=(2, 2))
| 1 | 20,083 | Is this needed? | holoviz-holoviews | py |
@@ -163,7 +163,7 @@ func (r *ReconcileRemoteClusterIngress) Reconcile(request reconcile.Request) (re
// can't proceed if the secret(s) referred to doesn't exist
certBundleSecrets, err := r.getIngressSecrets(rContext)
if err != nil {
- rContext.logger.WithError(err).Error("will need to retry until able to find all certBundle secrets")
+ rContext.logger.Warningf("will need to retry until able to find all certBundle secrets : %v", err)
conditionErr := r.setIngressCertificateNotFoundCondition(rContext, true, err.Error())
if conditionErr != nil {
rContext.logger.WithError(conditionErr).Error("unable to set IngressCertNotFound condition") | 1 | package remoteingress
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"reflect"
"sort"
"time"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
ingresscontroller "github.com/openshift/api/operator/v1"
apihelpers "github.com/openshift/hive/pkg/apis/helpers"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1alpha1"
hivemetrics "github.com/openshift/hive/pkg/controller/metrics"
"github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/resource"
)
const (
controllerName = "remoteingress"
// namespace where the ingressController objects must be created
remoteIngressControllerNamespace = "openshift-ingress-operator"
// while the IngressController objects live in openshift-ingress-operator
// the secrets that the ingressControllers refer to must live in openshift-ingress
remoteIngressConrollerSecretsNamespace = "openshift-ingress"
ingressCertificateNotFoundReason = "IngressCertificateNotFound"
ingressCertificateFoundReason = "IngressCertificateFound"
ingressSecretTolerationKey = "hive.openshift.io/ingress"
// requeueAfter2 is just a static 2 minute delay for when to requeue
// for the case when a necessary secret is missing
requeueAfter2 = time.Minute * 2
)
// kubeCLIApplier knows how to ApplyRuntimeObject.
type kubeCLIApplier interface {
ApplyRuntimeObject(obj runtime.Object, scheme *runtime.Scheme) (resource.ApplyResult, error)
}
// Add creates a new RemoteMachineSet Controller and adds it to the Manager with default RBAC. The Manager will set fields on the
// Controller and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return AddToManager(mgr, NewReconciler(mgr))
}
// NewReconciler returns a new reconcile.Reconciler
func NewReconciler(mgr manager.Manager) reconcile.Reconciler {
logger := log.WithField("controller", controllerName)
helper := resource.NewHelperWithMetricsFromRESTConfig(mgr.GetConfig(), controllerName, logger)
return &ReconcileRemoteClusterIngress{
Client: utils.NewClientWithMetricsOrDie(mgr, controllerName),
scheme: mgr.GetScheme(),
logger: log.WithField("controller", controllerName),
kubeCLI: helper,
}
}
// AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler
func AddToManager(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("remoteingress-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: utils.GetConcurrentReconciles()})
if err != nil {
return err
}
// Watch for changes to ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
return nil
}
type reconcileContext struct {
clusterDeployment *hivev1.ClusterDeployment
certBundleSecrets []*corev1.Secret
logger log.FieldLogger
}
var _ reconcile.Reconciler = &ReconcileRemoteClusterIngress{}
// ReconcileRemoteClusterIngress reconciles the ingress objects defined in a ClusterDeployment object
type ReconcileRemoteClusterIngress struct {
client.Client
scheme *runtime.Scheme
kubeCLI kubeCLIApplier
logger log.FieldLogger
}
// Reconcile reads that state of the cluster for a ClusterDeployment object and sets up
// any needed ClusterIngress objects up for syncing to the remote cluster.
//
// +kubebuilder:rbac:groups=hive.openshift.io,resources=clusterdeployments,verbs=get;watch;update
// +kubebuilder:rbac:groups=hive.openshift.io,resources=syncsets,verbs=get;create;update;delete;patch;list;watch
func (r *ReconcileRemoteClusterIngress) Reconcile(request reconcile.Request) (reconcile.Result, error) {
start := time.Now()
cdLog := r.logger.WithFields(log.Fields{
"clusterDeployment": request.Name,
"namespace": request.Namespace,
})
cdLog.Info("reconciling cluster deployment")
defer func() {
dur := time.Since(start)
hivemetrics.MetricControllerReconcileTime.WithLabelValues(controllerName).Observe(dur.Seconds())
cdLog.WithField("elapsed", dur).Info("reconcile complete")
}()
rContext := &reconcileContext{}
// Fetch the ClusterDeployment instance
cd := &hivev1.ClusterDeployment{}
err := r.Get(context.TODO(), request.NamespacedName, cd)
if err != nil {
if errors.IsNotFound(err) {
// Object not found (must have been deleted), return
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request
log.WithError(err).Error("error looking up cluster deployment")
return reconcile.Result{}, err
}
rContext.clusterDeployment = cd
// If the clusterdeployment is deleted, do not reconcile.
if cd.DeletionTimestamp != nil {
return reconcile.Result{}, nil
}
rContext.logger = cdLog
if cd.Spec.Ingress == nil {
// the addmission controller will ensure that we get valid-looking
// Spec.Ingress (ie no missing 'default', no going from a defined
// ingress list to an empty list, etc)
rContext.logger.Debug("no ingress objects defined. using default intaller behavior.")
return reconcile.Result{}, nil
}
// can't proceed if the secret(s) referred to doesn't exist
certBundleSecrets, err := r.getIngressSecrets(rContext)
if err != nil {
rContext.logger.WithError(err).Error("will need to retry until able to find all certBundle secrets")
conditionErr := r.setIngressCertificateNotFoundCondition(rContext, true, err.Error())
if conditionErr != nil {
rContext.logger.WithError(conditionErr).Error("unable to set IngressCertNotFound condition")
return reconcile.Result{}, conditionErr
}
// no error return b/c we just need to wait for the certificate/secret to appear
// which is out of our control.
return reconcile.Result{
Requeue: true,
RequeueAfter: requeueAfter2,
}, nil
}
if err := r.setIngressCertificateNotFoundCondition(rContext, false, ""); err != nil {
rContext.logger.WithError(err).Error("error setting clusterDeployment condition")
return reconcile.Result{}, err
}
rContext.certBundleSecrets = certBundleSecrets
if err := r.syncClusterIngress(rContext); err != nil {
cdLog.Errorf("error syncing clusterIngress syncset: %v", err)
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// syncClusterIngress will create the syncSet with all the needed secrets and
// ingressController objects to sync to the remote cluster
func (r *ReconcileRemoteClusterIngress) syncClusterIngress(rContext *reconcileContext) error {
rContext.logger.Info("reconciling ClusterIngress for cluster deployment")
rawList := rawExtensionsFromClusterDeployment(rContext)
return r.syncSyncSet(rContext, rawList)
}
// rawExtensionsFromClusterDeployment will return the slice of runtime.RawExtension objects
// (really the syncSet.Spec.Resources) to satisfy the ingress config for the clusterDeployment
func rawExtensionsFromClusterDeployment(rContext *reconcileContext) []runtime.RawExtension {
rawList := []runtime.RawExtension{}
// first the certBundle secrets
for _, cbSecret := range rContext.certBundleSecrets {
secret := createSecret(rContext, cbSecret)
raw := runtime.RawExtension{Object: secret}
rawList = append(rawList, raw)
}
// then the ingressControllers
for _, ingress := range rContext.clusterDeployment.Spec.Ingress {
ingressObj := createIngressController(rContext.clusterDeployment, ingress, rContext.certBundleSecrets)
raw := runtime.RawExtension{Object: ingressObj}
rawList = append(rawList, raw)
}
return rawList
}
func newSyncSetSpec(cd *hivev1.ClusterDeployment, rawExtensions []runtime.RawExtension) *hivev1.SyncSetSpec {
ssSpec := &hivev1.SyncSetSpec{
SyncSetCommonSpec: hivev1.SyncSetCommonSpec{
Resources: rawExtensions,
ResourceApplyMode: "sync",
},
ClusterDeploymentRefs: []corev1.LocalObjectReference{
{
Name: cd.Name,
},
},
}
return ssSpec
}
// syncSyncSet builds up a syncSet object with the passed-in rawExtensions as the spec.Resources
func (r *ReconcileRemoteClusterIngress) syncSyncSet(rContext *reconcileContext, rawExtensions []runtime.RawExtension) error {
ssName := apihelpers.GetResourceName(rContext.clusterDeployment.Name, "clusteringress")
newSyncSetSpec := newSyncSetSpec(rContext.clusterDeployment, rawExtensions)
syncSet := &hivev1.SyncSet{
ObjectMeta: metav1.ObjectMeta{
Name: ssName,
Namespace: rContext.clusterDeployment.Namespace,
},
TypeMeta: metav1.TypeMeta{
Kind: "SyncSet",
APIVersion: hivev1.SchemeGroupVersion.String(),
},
Spec: *newSyncSetSpec,
}
// ensure the syncset gets cleaned up when the clusterdeployment is deleted
if err := controllerutil.SetControllerReference(rContext.clusterDeployment, syncSet, r.scheme); err != nil {
r.logger.WithError(err).Error("error setting owner reference")
return err
}
if _, err := r.kubeCLI.ApplyRuntimeObject(syncSet, r.scheme); err != nil {
rContext.logger.WithError(err).Error("failed to apply syncset")
return err
}
return nil
}
// createSecret returns the secret that needs to be synced to the remote cluster
// to satisfy any ingressController object that depends on the secret.
func createSecret(rContext *reconcileContext, cbSecret *corev1.Secret) *corev1.Secret {
newSecret := cbSecret.DeepCopy()
// don't want all the local object meta (eg creation/uuid/etc), so replace it
// with a clean one with just the data we want.
newSecret.ObjectMeta = metav1.ObjectMeta{
Name: remoteSecretNameForCertificateBundleSecret(cbSecret.Name, rContext.clusterDeployment),
Namespace: remoteIngressConrollerSecretsNamespace,
Labels: cbSecret.Labels,
Annotations: cbSecret.Annotations,
}
return newSecret
}
// createIngressController will return an ingressController based on a clusterDeployment's
// spec.Ingress object
func createIngressController(cd *hivev1.ClusterDeployment, ingress hivev1.ClusterIngress, secrets []*corev1.Secret) *ingresscontroller.IngressController {
newIngress := ingresscontroller.IngressController{
TypeMeta: metav1.TypeMeta{
Kind: "IngressController",
APIVersion: ingresscontroller.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: ingress.Name,
Namespace: remoteIngressControllerNamespace,
},
Spec: ingresscontroller.IngressControllerSpec{
Domain: ingress.Domain,
RouteSelector: ingress.RouteSelector,
NamespaceSelector: ingress.NamespaceSelector,
},
}
// if the ingress entry references a certBundle, make sure to put the appropriate looking
// entry in the ingressController object
if ingress.ServingCertificate != "" {
var secretName string
for _, cb := range cd.Spec.CertificateBundles {
// assume we're going to find the certBundle as we would've errored earlier
if cb.Name == ingress.ServingCertificate {
secretName = cb.SecretRef.Name
newIngress.Spec.DefaultCertificate = &corev1.LocalObjectReference{
Name: remoteSecretNameForCertificateBundleSecret(cb.SecretRef.Name, cd),
}
break
}
}
// NOTE: This toleration is added to cause a reload of the
// IngressController when the certificate secrets are updated.
// In the future, this should not be necessary.
if len(secretName) != 0 {
newIngress.Spec.NodePlacement = &ingresscontroller.NodePlacement{
Tolerations: []corev1.Toleration{
{
Key: ingressSecretTolerationKey,
Operator: corev1.TolerationOpEqual,
Value: secretHash(findSecret(secretName, secrets)),
},
},
}
}
}
return &newIngress
}
func (r *ReconcileRemoteClusterIngress) getIngressSecrets(rContext *reconcileContext) ([]*corev1.Secret, error) {
certSet := sets.NewString()
for _, ingress := range rContext.clusterDeployment.Spec.Ingress {
if ingress.ServingCertificate != "" {
certSet.Insert(ingress.ServingCertificate)
}
}
cbSecrets := []*corev1.Secret{}
for _, cert := range certSet.List() {
foundCertBundle := false
for _, cb := range rContext.clusterDeployment.Spec.CertificateBundles {
if cb.Name == cert {
foundCertBundle = true
cbSecret := &corev1.Secret{}
searchKey := types.NamespacedName{
Name: cb.SecretRef.Name,
Namespace: rContext.clusterDeployment.Namespace,
}
if err := r.Get(context.TODO(), searchKey, cbSecret); err != nil {
if errors.IsNotFound(err) {
msg := fmt.Sprintf("secret %v for certbundle %v was not found", cb.SecretRef.Name, cb.Name)
rContext.logger.Error(msg)
return cbSecrets, fmt.Errorf(msg)
}
rContext.logger.WithError(err).Error("error while gathering certBundle secret")
return cbSecrets, err
}
cbSecrets = append(cbSecrets, cbSecret)
}
}
if !foundCertBundle {
return cbSecrets, fmt.Errorf("didn't find expected certbundle %v", cert)
}
}
return cbSecrets, nil
}
// setIngressCertificateNotFoundCondition will set/unset the condition indicating whether all certificates required
// by the clusterDeployment ingress objects were found. Returns any error encountered while setting the condition.
func (r *ReconcileRemoteClusterIngress) setIngressCertificateNotFoundCondition(rContext *reconcileContext, notFound bool, missingSecretMessage string) error {
var (
msg, reason string
status corev1.ConditionStatus
updateCheck utils.UpdateConditionCheck
)
origCD := rContext.clusterDeployment.DeepCopy()
if notFound {
msg = missingSecretMessage
status = corev1.ConditionTrue
reason = ingressCertificateNotFoundReason
updateCheck = utils.UpdateConditionIfReasonOrMessageChange
} else {
msg = fmt.Sprintf("all secrets for ingress found")
status = corev1.ConditionFalse
reason = ingressCertificateFoundReason
updateCheck = utils.UpdateConditionNever
}
rContext.clusterDeployment.Status.Conditions = utils.SetClusterDeploymentCondition(rContext.clusterDeployment.Status.Conditions,
hivev1.IngressCertificateNotFoundCondition, status, reason, msg, updateCheck)
if !reflect.DeepEqual(rContext.clusterDeployment.Status.Conditions, origCD.Status.Conditions) {
if err := r.Status().Update(context.TODO(), rContext.clusterDeployment); err != nil {
rContext.logger.WithError(err).Error("error updating clusterDeployment condition")
return err
}
}
return nil
}
// remoteSecretNameForCertificateBundleSecret just stitches together a secret name consisting of
// the original certificateBundle's secret name pre-pended with the clusterDeployment.Name
func remoteSecretNameForCertificateBundleSecret(secretName string, cd *hivev1.ClusterDeployment) string {
return apihelpers.GetResourceName(cd.Name, secretName)
}
func findSecret(secretName string, secrets []*corev1.Secret) *corev1.Secret {
for i, s := range secrets {
if s.Name == secretName {
return secrets[i]
}
}
return nil
}
func secretHash(secret *corev1.Secret) string {
if secret == nil {
return ""
}
b := &bytes.Buffer{}
// Write out map in sorted key order so we
// can get repeatable hashes
keys := []string{}
for k := range secret.Data {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
b.Write([]byte(k + ":"))
b.Write(secret.Data[k])
b.Write([]byte("\n"))
}
return fmt.Sprintf("%x", md5.Sum(b.Bytes()))
}
| 1 | 7,344 | This log should not be an error as it communicates that we need to retry till we get the cert information for remote ingress controller. | openshift-hive | go |
@@ -130,6 +130,8 @@ namespace Nethermind.Blockchain
BlockHeader[] FindHeaders(Keccak hash, int numberOfBlocks, int skip, bool reverse);
void DeleteInvalidBlock(Block invalidBlock);
+
+ void Flush();
event EventHandler<BlockEventArgs> NewBestSuggestedBlock;
event EventHandler<BlockEventArgs> BlockAddedToMain; | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using Nethermind.Blockchain.Find;
using Nethermind.Core;
using Nethermind.Core.Crypto;
using Nethermind.Dirichlet.Numerics;
namespace Nethermind.Blockchain
{
public interface IBlockTree : IBlockFinder
{
/// <summary>
/// Chain ID that identifies the chain among the public and private chains (different IDs for mainnet, ETH classic, etc.)
/// </summary>
int ChainId { get; }
/// <summary>
/// Genesis block or <value>null</value> if genesis has not been processed yet
/// </summary>
BlockHeader Genesis { get; }
/// <summary>
/// Best header that has been suggested
/// </summary>
BlockHeader BestSuggestedHeader { get; }
/// <summary>
/// Best block that has been suggested for processing
/// </summary>
Block BestSuggestedBody { get; }
/// <summary>
/// Lowest header added in reverse insert
/// </summary>
BlockHeader LowestInsertedHeader { get; }
/// <summary>
/// Lowest header added in reverse insert
/// </summary>
Block LowestInsertedBody { get; }
/// <summary>
/// Best downloaded block number
/// </summary>
long BestKnownNumber { get; }
/// <summary>
/// Inserts a disconnected block header (without body)
/// </summary>
/// <param name="header">Header to add</param>
/// <returns>Result of the operation, eg. Added, AlreadyKnown, etc.</returns>
AddBlockResult Insert(BlockHeader header);
/// <summary>
/// Inserts a disconnected block body
/// </summary>
/// <param name="block">Block to add</param>
/// <returns>Result of the operation, eg. Added, AlreadyKnown, etc.</returns>
AddBlockResult Insert(Block block);
void Insert(IEnumerable<Block> blocks);
/// <summary>
/// Suggests block for inclusion in the block tree.
/// </summary>
/// <param name="block">Block to be included</param>
/// <param name="shouldProcess">Whether a block should be processed or just added to the store</param>
/// <returns>Result of the operation, eg. Added, AlreadyKnown, etc.</returns>
AddBlockResult SuggestBlock(Block block, bool shouldProcess = true);
/// <summary>
/// Suggests a block header (without body)
/// </summary>
/// <param name="header">Header to add</param>
/// <returns>Result of the operation, eg. Added, AlreadyKnown, etc.</returns>
AddBlockResult SuggestHeader(BlockHeader header);
/// <summary>
/// Checks if the block was downloaded and the block RLP is in the DB
/// </summary>
/// <param name="number">Number of the block to check (needed for faster lookup)</param>
/// <param name="blockHash">Hash of the block to check</param>
/// <returns><value>True</value> if known, otherwise <value>False</value></returns>
bool IsKnownBlock(long number, Keccak blockHash);
/// <summary>
/// Checks if the state changes of the block can be found in the state tree.
/// </summary>
/// <param name="number">Number of the block to check (needed for faster lookup)</param>
/// <param name="blockHash">Hash of the block to check</param>
/// <returns><value>True</value> if processed, otherwise <value>False</value></returns>
bool WasProcessed(long number, Keccak blockHash);
/// <summary>
/// Marks all <paramref name="processedBlocks"/> as processed, changes chain head to the last of them and updates all the chain levels./>
/// </summary>
/// <param name="processedBlocks">Blocks that will now be at the top of the chain</param>
/// <param name="wereProcessed"></param>
void UpdateMainChain(Block[] processedBlocks, bool wereProcessed);
bool CanAcceptNewBlocks { get; }
Task LoadBlocksFromDb(CancellationToken cancellationToken, long? startBlockNumber, int batchSize = BlockTree.DbLoadBatchSize, int maxBlocksToLoad = int.MaxValue);
Task FixFastSyncGaps(CancellationToken cancellationToken);
ChainLevelInfo FindLevel(long number);
Keccak FindHash(long blockNumber);
BlockHeader[] FindHeaders(Keccak hash, int numberOfBlocks, int skip, bool reverse);
void DeleteInvalidBlock(Block invalidBlock);
event EventHandler<BlockEventArgs> NewBestSuggestedBlock;
event EventHandler<BlockEventArgs> BlockAddedToMain;
event EventHandler<BlockEventArgs> NewHeadBlock;
int DeleteChainSlice(in long startNumber, long? endNumber = null);
}
} | 1 | 23,460 | BlockTree alredy has a polluted API, this one seems very internal | NethermindEth-nethermind | .cs |
@@ -123,6 +123,7 @@ describe "BoltSpec::Run", ssh: true do
describe 'apply_manifest' do
it 'should apply a manifest file' do
+ bolt_inventory['features'] = ['puppet-agent']
with_tempfile_containing('manifest', "notify { 'hello world': }", '.pp') do |manifest|
results = apply_manifest(manifest.path, 'ssh')
results.each do |result| | 1 | # frozen_string_literal: true
require 'spec_helper'
require 'bolt_spec/conn'
require 'bolt_spec/run'
require 'bolt_spec/files'
# In order to speed up tests there are only ssh versions of these specs
# While the target shouldn't matter this does mean this helper is not tested on
# windows controllers.
describe "BoltSpec::Run", ssh: true do
include BoltSpec::Run
include BoltSpec::Conn
include BoltSpec::Files
let(:modulepath) { File.join(__dir__, '../fixtures/modules') }
let(:bolt_config) {
{ "modulepath" => modulepath,
"ssh" => { "host-key-check" => false },
"winrm" => { "ssl" => false } }
}
let(:bolt_inventory) { conn_inventory }
describe 'run_task' do
it 'should run a task on a node' do
result = run_task('sample::echo', 'ssh', {})
expect(result[0]['status']).to eq('success')
end
it 'should accept _catch_errors' do
result = run_task('sample::echo', 'non_existent_node', '_catch_errors' => true)
expect(result[0]['status']).to eq('failure')
expect(result[0]['result']['_error']['kind']).to eq('puppetlabs.tasks/connect-error')
end
end
describe 'run_command' do
it 'should run a command on a node', ssh: true do
result = run_command('echo hello', 'ssh')
expect(result[0]['status']).to eq('success')
end
it 'should accept _catch_errors' do
result = run_command('echo hello', 'non_existent_node', options: { catch_errors: true })
expect(result[0]['status']).to eq('failure')
expect(result[0]['result']['_error']['kind']).to eq('puppetlabs.tasks/connect-error')
end
end
describe 'run_script' do
let(:script) { File.join(bolt_config['modulepath'], '..', 'scripts', 'success.sh') }
it 'should run a command on a node with an argument', ssh: true do
result = run_script(script, 'ssh', ['hi'])
expect(result[0]['status']).to eq('success')
expect(result[0]['result']['stdout']).to match(/arg: hi/)
end
it 'should accept _catch_errors' do
result = run_script('missing.sh', 'non_existent_node', nil, options: { catch_errors: true })
expect(result[0]['status']).to eq('failure')
expect(result[0]['result']['_error']['kind']).to eq('puppetlabs.tasks/connect-error')
end
end
describe 'upload_file' do
let(:file) { File.join(bolt_config['modulepath'], '..', 'scripts', 'success.sh') }
let(:dest) { "/tmp/#{SecureRandom.hex}" }
it 'should upload a file to a node', ssh: true do
result = upload_file(file, dest, 'ssh')
expect(result[0]['status']).to eq('success')
end
it 'should accept _catch_errors' do
result = run_script('missing.sh', 'non_existent_node', nil, options: { catch_errors: true })
expect(result[0]['status']).to eq('failure')
expect(result[0]['result']['_error']['kind']).to eq('puppetlabs.tasks/connect-error')
end
end
describe 'run_plan' do
it 'should run a plan' do
result = run_plan('sample::single_task', 'nodes' => 'ssh')
expect(result['status']).to eq('success')
data = result['value'][0]
expect(data['status']).to eq('success')
end
it 'should return a failure' do
result = run_plan('error::run_fail', 'target' => 'ssh')
expect(result['status']).to eq('failure')
expect(result['value']['kind']).to eq('bolt/run-failure')
end
end
context 'with a target that has a puppet-agent installed' do
def root_config
{ 'ssh' => {
'run-as' => 'root',
'sudo-password' => conn_info('ssh')[:password],
'host-key-check' => false
} }
end
before(:all) do
result = run_task('puppet_agent::version', 'ssh', {}, inventory: conn_inventory, config: root_config)
expect(result.first['status']).to eq('success')
unless result.first['result']['version']
result = run_task('puppet_agent::install', 'ssh', {}, inventory: conn_inventory, config: root_config)
end
expect(result.first['status']).to eq('success')
end
after(:all) do
uninstall = '/opt/puppetlabs/bin/puppet resource package puppet-agent ensure=absent'
run_command(uninstall, 'ssh', inventory: conn_inventory, config: root_config)
end
describe 'apply_manifest' do
it 'should apply a manifest file' do
with_tempfile_containing('manifest', "notify { 'hello world': }", '.pp') do |manifest|
results = apply_manifest(manifest.path, 'ssh')
results.each do |result|
expect(result['status']).to eq('success')
expect(result.dig('result', 'report', 'resource_statuses')).to include('Notify[hello world]')
end
end
end
it 'should apply a manifest code block' do
results = apply_manifest("notify { 'hello world': }", 'ssh', execute: true)
results.each do |result|
expect(result['status']).to eq('success')
expect(result.dig('result', 'report', 'resource_statuses')).to include('Notify[hello world]')
end
end
it 'should raise an error when manifest file does not exist' do
expect do
apply_manifest("missing.na", 'ssh')
end.to raise_error(Bolt::FileError)
end
it 'should return a failure' do
results = apply_manifest("fail()", 'ssh', execute: true)
results.each do |result|
expect(result['status']).to eq('failure')
expect(result.dig('result', '_error', 'kind')).to eq('bolt/apply-error')
end
end
end
end
end
| 1 | 13,620 | Sorry, I realize my line numbers were probably off after you deleted your variables! I meant for this to go in the `before(:all) do` and `after(:all) do` blocks. | puppetlabs-bolt | rb |
@@ -195,7 +195,9 @@ public class ToParentBlockJoinQuery extends Query {
return null;
}
}
- return MatchesUtils.MATCH_WITH_NO_TERMS;
+
+ // TODO: which fields should be here?
+ return MatchesUtils.matchWithNoTerms(getQuery());
}
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search.join;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Locale;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.FilterWeight;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Matches;
import org.apache.lucene.search.MatchesUtils;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.ScorerSupplier;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitSet;
import static org.apache.lucene.search.ScoreMode.COMPLETE;
/**
* This query requires that you index
* children and parent docs as a single block, using the
* {@link IndexWriter#addDocuments IndexWriter.addDocuments()} or {@link
* IndexWriter#updateDocuments IndexWriter.updateDocuments()} API. In each block, the
* child documents must appear first, ending with the parent
* document. At search time you provide a Filter
* identifying the parents, however this Filter must provide
* an {@link BitSet} per sub-reader.
*
* <p>Once the block index is built, use this query to wrap
* any sub-query matching only child docs and join matches in that
* child document space up to the parent document space.
* You can then use this Query as a clause with
* other queries in the parent document space.</p>
*
* <p>See {@link ToChildBlockJoinQuery} if you need to join
* in the reverse order.
*
* <p>The child documents must be orthogonal to the parent
* documents: the wrapped child query must never
* return a parent document.</p>
*
* <p>See {@link org.apache.lucene.search.join} for an
* overview. </p>
*
* @lucene.experimental
*/
public class ToParentBlockJoinQuery extends Query {
private final BitSetProducer parentsFilter;
private final Query childQuery;
private final ScoreMode scoreMode;
/** Create a ToParentBlockJoinQuery.
*
* @param childQuery Query matching child documents.
* @param parentsFilter Filter identifying the parent documents.
* @param scoreMode How to aggregate multiple child scores
* into a single parent score.
**/
public ToParentBlockJoinQuery(Query childQuery, BitSetProducer parentsFilter, ScoreMode scoreMode) {
super();
this.childQuery = childQuery;
this.parentsFilter = parentsFilter;
this.scoreMode = scoreMode;
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
@Override
public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode weightScoreMode, float boost) throws IOException {
ScoreMode childScoreMode = weightScoreMode.needsScores() ? scoreMode : ScoreMode.None;
final Weight childWeight;
if (childScoreMode == ScoreMode.None) {
// we don't need to compute a score for the child query so we wrap
// it under a constant score query that can early terminate if the
// minimum score is greater than 0 and the total hits that match the
// query is not requested.
childWeight = searcher.rewrite(new ConstantScoreQuery(childQuery)).createWeight(searcher, weightScoreMode, 0f);
} else {
// if the score is needed we force the collection mode to COMPLETE because the child query cannot skip
// non-competitive documents.
childWeight = childQuery.createWeight(searcher, weightScoreMode.needsScores() ? COMPLETE : weightScoreMode, boost);
}
return new BlockJoinWeight(this, childWeight, parentsFilter, childScoreMode);
}
/** Return our child query. */
public Query getChildQuery() {
return childQuery;
}
private static class BlockJoinWeight extends FilterWeight {
private final BitSetProducer parentsFilter;
private final ScoreMode scoreMode;
public BlockJoinWeight(Query joinQuery, Weight childWeight, BitSetProducer parentsFilter, ScoreMode scoreMode) {
super(joinQuery, childWeight);
this.parentsFilter = parentsFilter;
this.scoreMode = scoreMode;
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final ScorerSupplier scorerSupplier = scorerSupplier(context);
if (scorerSupplier == null) {
return null;
}
return scorerSupplier.get(Long.MAX_VALUE);
}
// NOTE: acceptDocs applies (and is checked) only in the
// parent document space
@Override
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
final ScorerSupplier childScorerSupplier = in.scorerSupplier(context);
if (childScorerSupplier == null) {
return null;
}
// NOTE: this does not take accept docs into account, the responsibility
// to not match deleted docs is on the scorer
final BitSet parents = parentsFilter.getBitSet(context);
if (parents == null) {
// No matches
return null;
}
return new ScorerSupplier() {
@Override
public Scorer get(long leadCost) throws IOException {
return new BlockJoinScorer(BlockJoinWeight.this, childScorerSupplier.get(leadCost), parents, scoreMode);
}
@Override
public long cost() {
return childScorerSupplier.cost();
}
};
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
BlockJoinScorer scorer = (BlockJoinScorer) scorer(context);
if (scorer != null && scorer.iterator().advance(doc) == doc) {
return scorer.explain(context, in);
}
return Explanation.noMatch("Not a match");
}
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
// The default implementation would delegate to the joinQuery's Weight, which
// matches on children. We need to match on the parent instead
Scorer scorer = scorer(context);
if (scorer == null) {
return null;
}
final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
if (twoPhase == null) {
if (scorer.iterator().advance(doc) != doc) {
return null;
}
}
else {
if (twoPhase.approximation().advance(doc) != doc || twoPhase.matches() == false) {
return null;
}
}
return MatchesUtils.MATCH_WITH_NO_TERMS;
}
}
private static class ParentApproximation extends DocIdSetIterator {
private final DocIdSetIterator childApproximation;
private final BitSet parentBits;
private int doc = -1;
ParentApproximation(DocIdSetIterator childApproximation, BitSet parentBits) {
this.childApproximation = childApproximation;
this.parentBits = parentBits;
}
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int advance(int target) throws IOException {
if (target >= parentBits.length()) {
return doc = NO_MORE_DOCS;
}
final int firstChildTarget = target == 0 ? 0 : parentBits.prevSetBit(target - 1) + 1;
int childDoc = childApproximation.docID();
if (childDoc < firstChildTarget) {
childDoc = childApproximation.advance(firstChildTarget);
}
if (childDoc >= parentBits.length() - 1) {
return doc = NO_MORE_DOCS;
}
return doc = parentBits.nextSetBit(childDoc + 1);
}
@Override
public long cost() {
return childApproximation.cost();
}
}
private static class ParentTwoPhase extends TwoPhaseIterator {
private final ParentApproximation parentApproximation;
private final DocIdSetIterator childApproximation;
private final TwoPhaseIterator childTwoPhase;
ParentTwoPhase(ParentApproximation parentApproximation, TwoPhaseIterator childTwoPhase) {
super(parentApproximation);
this.parentApproximation = parentApproximation;
this.childApproximation = childTwoPhase.approximation();
this.childTwoPhase = childTwoPhase;
}
@Override
public boolean matches() throws IOException {
assert childApproximation.docID() < parentApproximation.docID();
do {
if (childTwoPhase.matches()) {
return true;
}
} while (childApproximation.nextDoc() < parentApproximation.docID());
return false;
}
@Override
public float matchCost() {
// TODO: how could we compute a match cost?
return childTwoPhase.matchCost() + 10;
}
}
static class BlockJoinScorer extends Scorer {
private final Scorer childScorer;
private final BitSet parentBits;
private final ScoreMode scoreMode;
private final DocIdSetIterator childApproximation;
private final TwoPhaseIterator childTwoPhase;
private final ParentApproximation parentApproximation;
private final ParentTwoPhase parentTwoPhase;
private float score;
public BlockJoinScorer(Weight weight, Scorer childScorer, BitSet parentBits, ScoreMode scoreMode) {
super(weight);
//System.out.println("Q.init firstChildDoc=" + firstChildDoc);
this.parentBits = parentBits;
this.childScorer = childScorer;
this.scoreMode = scoreMode;
childTwoPhase = childScorer.twoPhaseIterator();
if (childTwoPhase == null) {
childApproximation = childScorer.iterator();
parentApproximation = new ParentApproximation(childApproximation, parentBits);
parentTwoPhase = null;
} else {
childApproximation = childTwoPhase.approximation();
parentApproximation = new ParentApproximation(childTwoPhase.approximation(), parentBits);
parentTwoPhase = new ParentTwoPhase(parentApproximation, childTwoPhase);
}
}
@Override
public Collection<ChildScorable> getChildren() {
return Collections.singleton(new ChildScorable(childScorer, "BLOCK_JOIN"));
}
@Override
public DocIdSetIterator iterator() {
if (parentTwoPhase == null) {
// the approximation is exact
return parentApproximation;
} else {
return TwoPhaseIterator.asDocIdSetIterator(parentTwoPhase);
}
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return parentTwoPhase;
}
@Override
public int docID() {
return parentApproximation.docID();
}
@Override
public float score() throws IOException {
setScoreAndFreq();
return score;
}
@Override
public float getMaxScore(int upTo) throws IOException {
if (scoreMode == ScoreMode.None) {
return childScorer.getMaxScore(upTo);
}
return Float.POSITIVE_INFINITY;
}
@Override
public void setMinCompetitiveScore(float minScore) throws IOException {
if (scoreMode == ScoreMode.None) {
childScorer.setMinCompetitiveScore(minScore);
}
}
private void setScoreAndFreq() throws IOException {
if (childApproximation.docID() >= parentApproximation.docID()) {
return;
}
double score = scoreMode == ScoreMode.None ? 0 : childScorer.score();
int freq = 1;
while (childApproximation.nextDoc() < parentApproximation.docID()) {
if (childTwoPhase == null || childTwoPhase.matches()) {
final float childScore = scoreMode == ScoreMode.None ? 0 : childScorer.score();
freq += 1;
switch (scoreMode) {
case Total:
case Avg:
score += childScore;
break;
case Min:
score = Math.min(score, childScore);
break;
case Max:
score = Math.max(score, childScore);
break;
case None:
break;
default:
throw new AssertionError();
}
}
}
if (childApproximation.docID() == parentApproximation.docID() && (childTwoPhase == null || childTwoPhase.matches())) {
throw new IllegalStateException("Child query must not match same docs with parent filter. "
+ "Combine them as must clauses (+) to find a problem doc. "
+ "docId=" + parentApproximation.docID() + ", " + childScorer.getClass());
}
if (scoreMode == ScoreMode.Avg) {
score /= freq;
}
this.score = (float) score;
}
public Explanation explain(LeafReaderContext context, Weight childWeight) throws IOException {
int prevParentDoc = parentBits.prevSetBit(parentApproximation.docID() - 1);
int start = context.docBase + prevParentDoc + 1; // +1 b/c prevParentDoc is previous parent doc
int end = context.docBase + parentApproximation.docID() - 1; // -1 b/c parentDoc is parent doc
Explanation bestChild = null;
int matches = 0;
for (int childDoc = start; childDoc <= end; childDoc++) {
Explanation child = childWeight.explain(context, childDoc - context.docBase);
if (child.isMatch()) {
matches++;
if (bestChild == null || child.getValue().floatValue() > bestChild.getValue().floatValue()) {
bestChild = child;
}
}
}
return Explanation.match(score(), String.format(Locale.ROOT,
"Score based on %d child docs in range from %d to %d, best match:", matches, start, end), bestChild
);
}
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
final Query childRewrite = childQuery.rewrite(reader);
if (childRewrite != childQuery) {
return new ToParentBlockJoinQuery(childRewrite,
parentsFilter,
scoreMode);
} else {
return super.rewrite(reader);
}
}
@Override
public String toString(String field) {
return "ToParentBlockJoinQuery ("+childQuery.toString()+")";
}
@Override
public boolean equals(Object other) {
return sameClassAs(other) &&
equalsTo(getClass().cast(other));
}
private boolean equalsTo(ToParentBlockJoinQuery other) {
return childQuery.equals(other.childQuery) &&
parentsFilter.equals(other.parentsFilter) &&
scoreMode == other.scoreMode;
}
@Override
public int hashCode() {
final int prime = 31;
int hash = classHash();
hash = prime * hash + childQuery.hashCode();
hash = prime * hash + scoreMode.hashCode();
hash = prime * hash + parentsFilter.hashCode();
return hash;
}
}
| 1 | 35,832 | This bit I wasn't really sure about. | apache-lucene-solr | java |
@@ -87,6 +87,7 @@ function buildRules(grunt, options, commons, callback) {
var tags = options.tags ? options.tags.split(/\s*,\s*/) : [];
var rules = result.rules;
var checks = result.checks;
+ parseChecks(checks);
// Translate checks
if (locale && locale.checks) { | 1 | /*eslint-env node */
/*eslint max-len: off */
'use strict';
var clone = require('clone');
var dot = require('@deque/dot');
var templates = require('./templates');
var buildManual = require('./build-manual');
var entities = new (require('html-entities').AllHtmlEntities)();
var packageJSON = require('../package.json');
var dotRegex = /\{\{.+?\}\}/g;
var axeVersion = packageJSON.version.substring(
0,
packageJSON.version.lastIndexOf('.')
);
var descriptionTableHeader =
'| Rule ID | Description | Impact | Tags | Issue Type |\n| :------- | :------- | :------- | :------- | :------- |\n';
dot.templateSettings.strip = false;
function getLocale(grunt, options) {
var localeFile;
if (options.locale) {
localeFile = './locales/' + options.locale + '.json';
}
if (localeFile) {
return grunt.file.readJSON(localeFile);
}
}
function makeHeaderLink(title) {
return title
.replace(/ /g, '-')
.replace(/[\.&]/g, '')
.toLowerCase();
}
function buildRules(grunt, options, commons, callback) {
var axeImpact = Object.freeze(['minor', 'moderate', 'serious', 'critical']); // TODO: require('../axe') does not work if grunt configure is moved after uglify, npm test breaks with undefined. Complicated grunt concurrency issue.
var locale = getLocale(grunt, options);
options.getFiles = false;
buildManual(grunt, options, commons, function(result) {
var metadata = {
rules: {},
checks: {}
};
var descriptions = {
wcag20: {
title: 'WCAG 2.0 Level A & AA Rules',
rules: []
},
wcag21: {
title: 'WCAG 2.1 Level A & AA Rules',
rules: []
},
bestPractice: {
title: 'Best Practices Rules',
intro:
'Rules that do not necessarily conform to WCAG success criterion but are industry accepted practices that improve the user experience.',
rules: []
},
experimental: {
title: 'Experimental Rules',
intro:
'Rules we are still testing and developing. They are not enabled by default in axe-core, but are enabled for the axe browser extensions.',
rules: []
},
deprecated: {
title: 'Deprecated Rules',
intro:
'Deprecated rules are not enabled by default and will be removed in the next major release.',
rules: []
}
};
var TOC = Object.keys(descriptions)
.map(key => {
return `- [${descriptions[key].title}](#${makeHeaderLink(
descriptions[key].title
)})`;
})
.join('\n');
var tags = options.tags ? options.tags.split(/\s*,\s*/) : [];
var rules = result.rules;
var checks = result.checks;
// Translate checks
if (locale && locale.checks) {
checks.forEach(function(check) {
if (locale.checks[check.id] && check.metadata) {
check.metadata.messages = locale.checks[check.id];
}
});
}
function parseMetaData(source, propType) {
var data = source.metadata;
var key = source.id || source.type;
if (key && locale && locale[propType] && propType !== 'checks') {
data = locale[propType][key] || data;
}
var result = clone(data) || {};
if (result.messages) {
Object.keys(result.messages).forEach(function(key) {
// only convert to templated function for strings
// objects handled later in publish-metadata.js
if (
typeof result.messages[key] !== 'object' &&
dotRegex.test(result.messages[key])
) {
result.messages[key] = dot
.template(result.messages[key])
.toString();
}
});
}
//TODO this is actually failureSummaries, property name should better reflect that
if (result.failureMessage && dotRegex.test(result.failureMessage)) {
result.failureMessage = dot.template(result.failureMessage).toString();
}
return result;
}
function createFailureSummaryObject(summaries) {
var result = {};
summaries.forEach(function(summary) {
if (summary.type) {
result[summary.type] = parseMetaData(summary, 'failureSummaries');
}
});
return result;
}
function getIncompleteMsg(summaries) {
var result = {};
summaries.forEach(function(summary) {
if (
summary.incompleteFallbackMessage &&
dotRegex.test(summary.incompleteFallbackMessage)
) {
result = dot.template(summary.incompleteFallbackMessage).toString();
}
});
return result;
}
function replaceFunctions(string) {
return string
.replace(
/"(evaluate|after|gather|matches|source|commons)":\s*("[^"]+?.js")/g,
function(m, p1, p2) {
return m.replace(p2, getSource(p2.replace(/^"|"$/g, ''), p1));
}
)
.replace(/"(function anonymous\([\s\S]+?\) {)([\s\S]+?)(})"/g, function(
m
) {
return JSON.parse(m);
})
.replace(/"(\(function \(\) {)([\s\S]+?)(}\)\(\))"/g, function(m) {
return JSON.parse(m);
});
}
function getSource(file, type) {
return grunt.template.process(templates[type], {
data: {
source: grunt.file.read(file)
}
});
}
function findCheck(checks, id) {
return checks.filter(function(check) {
if (check.id === id) {
return true;
}
})[0];
}
function blacklist(k, v) {
if (options.blacklist.indexOf(k) !== -1) {
return undefined;
}
return v;
}
function parseChecks(collection) {
return collection.map(function(check) {
var c = {};
var id = typeof check === 'string' ? check : check.id;
var definition = clone(findCheck(checks, id));
if (!definition) {
grunt.log.error('check ' + id + ' not found');
}
c.options = check.options || definition.options;
c.id = id;
if (definition.metadata && !metadata.checks[id]) {
metadata.checks[id] = parseMetaData(definition, 'checks');
}
return c.options === undefined ? id : c;
});
}
function traverseChecks(checkCollection, predicate, startValue) {
return checkCollection.reduce(function(out, check) {
var id = typeof check === 'string' ? check : check.id;
var definition = clone(findCheck(checks, id));
if (!definition) {
grunt.log.error('check ' + id + ' not found');
}
return predicate(definition, out);
}, startValue);
}
function parseImpactForRule(rule) {
function capitalize(s) {
return s.charAt(0).toUpperCase() + s.slice(1);
}
if (rule.impact) {
return capitalize(rule.impact);
}
function getUniqueArr(arr) {
return arr.filter(function(value, index, self) {
return self.indexOf(value) === index;
});
}
function getImpactScores(definition, out) {
if (definition && definition.metadata && definition.metadata.impact) {
var impactScore = axeImpact.indexOf(definition.metadata.impact);
out.push(impactScore);
}
return out;
}
function getScore(checkCollection, onlyHighestScore) {
var scores = traverseChecks(checkCollection, getImpactScores, []);
if (scores && scores.length) {
return onlyHighestScore
? [Math.max.apply(null, scores)]
: getUniqueArr(scores);
} else {
return [];
}
}
var highestImpactForRuleTypeAny = getScore(rule.any, true);
var allUniqueImpactsForRuleTypeAll = getScore(rule.all, false);
var allUniqueImpactsForRuleTypeNone = getScore(rule.none, false);
var cumulativeImpacts = highestImpactForRuleTypeAny
.concat(allUniqueImpactsForRuleTypeAll)
.concat(allUniqueImpactsForRuleTypeNone);
var cumulativeScores = getUniqueArr(cumulativeImpacts).sort(); //order lowest to highest
return cumulativeScores.reduce(function(out, cV) {
return out.length
? out + ', ' + capitalize(axeImpact[cV])
: capitalize(axeImpact[cV]);
}, '');
}
function parseFailureForRule(rule) {
function hasFailure(definition, out) {
if (definition && definition.metadata && definition.metadata.impact) {
out = out || !!definition.metadata.messages.fail;
}
return out;
}
return (
traverseChecks(rule.any, hasFailure, false) ||
traverseChecks(rule.all, hasFailure, false) ||
traverseChecks(rule.none, hasFailure, false)
);
}
function parseIncompleteForRule(rule) {
function hasIncomplete(definition, out) {
if (definition && definition.metadata && definition.metadata.impact) {
out = out || !!definition.metadata.messages.incomplete;
}
return out;
}
return (
traverseChecks(rule.any, hasIncomplete, false) ||
traverseChecks(rule.all, hasIncomplete, false) ||
traverseChecks(rule.none, hasIncomplete, false)
);
}
rules.map(function(rule) {
var impact = parseImpactForRule(rule);
var canFail = parseFailureForRule(rule);
var canIncomplete = parseIncompleteForRule(rule);
rule.any = parseChecks(rule.any);
rule.all = parseChecks(rule.all);
rule.none = parseChecks(rule.none);
if (rule.metadata && !metadata.rules[rule.id]) {
metadata.rules[rule.id] = parseMetaData(rule, 'rules'); // Translate rules
}
var rules;
if (rule.tags.includes('deprecated')) {
rules = descriptions.deprecated.rules;
} else if (rule.tags.includes('experimental')) {
rules = descriptions.experimental.rules;
} else if (rule.tags.includes('best-practice')) {
rules = descriptions.bestPractice.rules;
} else if (rule.tags.find(tag => tag.startsWith('wcag2a'))) {
rules = descriptions.wcag20.rules;
} else {
rules = descriptions.wcag21.rules;
}
var issueType = [];
if (canFail) {
issueType.push('failure');
}
if (canIncomplete) {
issueType.push('needs review');
}
rules.push([
`[${rule.id}](https://dequeuniversity.com/rules/axe/${axeVersion}/${rule.id}?application=RuleDescription)`,
entities.encode(rule.metadata.description),
impact,
rule.tags.join(', '),
issueType.join(', ')
]);
if (tags.length) {
rule.enabled = !!rule.tags.filter(function(t) {
return tags.indexOf(t) !== -1;
}).length;
}
return rule;
});
var ruleTables = Object.keys(descriptions)
.map(key => {
var description = descriptions[key];
return `
## ${description.title}
${description.intro ? description.intro : ''}
${
description.rules.length
? descriptionTableHeader
: '_There are no matching rules_'
}${description.rules
.map(function(row) {
return '| ' + row.join(' | ') + ' |';
})
.join('\n')}`;
})
.join('\n\n');
var descriptions = `
# Rule Descriptions
## Table of Contents
${TOC}
${ruleTables}`;
// Translate failureSummaries
metadata.failureSummaries = createFailureSummaryObject(result.misc);
metadata.incompleteFallbackMessage = getIncompleteMsg(result.misc);
callback({
auto: replaceFunctions(
JSON.stringify(
{
lang: options.locale || 'en',
data: metadata,
rules: rules,
checks: checks
},
blacklist
)
),
manual: replaceFunctions(
JSON.stringify(
{
data: metadata,
rules: rules,
checks: checks
},
blacklist
)
),
descriptions
});
});
}
module.exports = buildRules;
| 1 | 15,884 | From the code, a check's metatdata was only added to `axe._load` if a rule used it. Since `role-none` and `role-presentation` were no longer used in any rule, their metadata was never added. This caused any translation file that passed translations for those checks to fail `axe.configure` with > "Locale provided for unknown check: "role-none"" To fix I added this line to parse all checks regardless of if a rule uses it or not. | dequelabs-axe-core | js |
@@ -740,7 +740,7 @@ class Package:
gc.enable()
return pkg
- def set_dir(self, lkey, path=None, meta=None):
+ def set_dir(self, lkey, path=None, meta=None, update_policy="incoming"):
"""
Adds all files from `path` to the package.
| 1 | import inspect
from collections import deque
import gc
import hashlib
import io
import json
import pathlib
import os
import shutil
import time
from multiprocessing import Pool
import uuid
import warnings
import jsonlines
from tqdm import tqdm
from .backends import get_package_registry
from .data_transfer import (
calculate_sha256, copy_file, copy_file_list, get_bytes, get_size_and_version,
list_object_versions, list_url, put_bytes
)
from .exceptions import PackageException
from .formats import FormatRegistry
from .telemetry import ApiTelemetry
from .util import (
QuiltException, fix_url, get_from_config, get_install_location,
validate_package_name, quiltignore_filter, validate_key, extract_file_extension,
parse_sub_package_name, RemovedInQuilt4Warning)
from .util import CACHE_PATH, TEMPFILE_DIR_PATH as APP_DIR_TEMPFILE_DIR, PhysicalKey, \
user_is_configured_to_custom_stack, catalog_package_url, DISABLE_TQDM
def hash_file(readable_file):
""" Returns SHA256 hash of readable file-like object """
buf = readable_file.read(4096)
hasher = hashlib.sha256()
while buf:
hasher.update(buf)
buf = readable_file.read(4096)
return hasher.hexdigest()
def _delete_local_physical_key(pk):
assert pk.is_local(), "This function only works on files that live on a local disk"
pathlib.Path(pk.path).unlink()
def _filesystem_safe_encode(key):
"""Returns the sha256 of the key. This ensures there are no slashes, uppercase/lowercase conflicts,
avoids `OSError: [Errno 36] File name too long:`, etc."""
return hashlib.sha256(key.encode()).hexdigest()
class ObjectPathCache:
@classmethod
def _cache_path(cls, url):
url_hash = _filesystem_safe_encode(url)
return CACHE_PATH / url_hash[0:2] / url_hash[2:]
@classmethod
def get(cls, url):
cache_path = cls._cache_path(url)
try:
with open(cache_path) as fd:
path, dev, ino, mtime = json.load(fd)
except (FileNotFoundError, ValueError):
return None
try:
stat = pathlib.Path(path).stat()
except FileNotFoundError:
return None
# check if device, file, and timestamp are unchanged => cache hit
# see also https://docs.python.org/3/library/os.html#os.stat_result
if stat.st_dev == dev and stat.st_ino == ino and stat.st_mtime_ns == mtime:
return path
else:
return None
@classmethod
def set(cls, url, path):
stat = pathlib.Path(path).stat()
cache_path = cls._cache_path(url)
cache_path.parent.mkdir(parents=True, exist_ok=True)
with open(cache_path, 'w') as fd:
json.dump([path, stat.st_dev, stat.st_ino, stat.st_mtime_ns], fd)
@classmethod
def clear(cls):
shutil.rmtree(CACHE_PATH)
class PackageEntry:
"""
Represents an entry at a logical key inside a package.
"""
__slots__ = ['physical_key', 'size', 'hash', '_meta']
def __init__(self, physical_key, size, hash_obj, meta):
"""
Creates an entry.
Args:
physical_key: a URI (either `s3://` or `file://`)
size(number): size of object in bytes
hash({'type': string, 'value': string}): hash object
for example: {'type': 'SHA256', 'value': 'bb08a...'}
meta(dict): metadata dictionary
Returns:
a PackageEntry
"""
assert isinstance(physical_key, PhysicalKey)
self.physical_key = physical_key
self.size = size
self.hash = hash_obj
self._meta = meta or {}
def __eq__(self, other):
return (
# Don't check physical keys.
self.size == other.size
and self.hash == other.hash
and self._meta == other._meta
)
def __repr__(self):
return f"PackageEntry('{self.physical_key}')"
def as_dict(self):
"""
Returns dict representation of entry.
"""
return {
'physical_keys': [str(self.physical_key)],
'size': self.size,
'hash': self.hash,
'meta': self._meta
}
@property
def meta(self):
return self._meta.get('user_meta', dict())
def set_meta(self, meta):
"""
Sets the user_meta for this PackageEntry.
"""
self._meta['user_meta'] = meta
def _verify_hash(self, read_bytes):
"""
Verifies hash of bytes
"""
if self.hash is None:
raise QuiltException("Hash missing - need to build the package")
if self.hash.get('type') != 'SHA256':
raise NotImplementedError
digest = hashlib.sha256(read_bytes).hexdigest()
if digest != self.hash.get('value'):
raise QuiltException("Hash validation failed")
def set(self, path=None, meta=None):
"""
Returns self with the physical key set to path.
Args:
logical_key(string): logical key to update
path(string): new path to place at logical_key in the package
Currently only supports a path on local disk
meta(dict): metadata dict to attach to entry. If meta is provided, set just
updates the meta attached to logical_key without changing anything
else in the entry
Returns:
self
"""
if path is not None:
self.physical_key = PhysicalKey.from_url(fix_url(path))
self.size = None
self.hash = None
elif meta is not None:
self.set_meta(meta)
else:
raise PackageException('Must specify either path or meta')
def get(self):
"""
Returns the physical key of this PackageEntry.
"""
return str(self.physical_key)
def get_cached_path(self):
"""
Returns a locally cached physical key, if available.
"""
if not self.physical_key.is_local():
return ObjectPathCache.get(str(self.physical_key))
return None
def get_bytes(self, use_cache_if_available=True):
"""
Returns the bytes of the object this entry corresponds to. If 'use_cache_if_available'=True, will first try to
retrieve the bytes from cache.
"""
if use_cache_if_available:
cached_path = self.get_cached_path()
if cached_path is not None:
return get_bytes(PhysicalKey(None, cached_path, None))
data = get_bytes(self.physical_key)
return data
def get_as_json(self, use_cache_if_available=True):
"""
Returns a JSON file as a `dict`. Assumes that the file is encoded using utf-8.
If 'use_cache_if_available'=True, will first try to retrieve the object from cache.
"""
obj_bytes = self.get_bytes(use_cache_if_available=use_cache_if_available)
return json.loads(obj_bytes.decode("utf-8"))
def get_as_string(self, use_cache_if_available=True):
"""
Return the object as a string. Assumes that the file is encoded using utf-8.
If 'use_cache_if_available'=True, will first try to retrieve the object from cache.
"""
obj_bytes = self.get_bytes(use_cache_if_available=use_cache_if_available)
return obj_bytes.decode("utf-8")
def deserialize(self, func=None, **format_opts):
"""
Returns the object this entry corresponds to.
Args:
func: Skip normal deserialization process, and call func(bytes),
returning the result directly.
**format_opts: Some data formats may take options. Though
normally handled by metadata, these can be overridden here.
Returns:
The deserialized object from the logical_key
Raises:
physical key failure
hash verification fail
when deserialization metadata is not present
"""
data = get_bytes(self.physical_key)
if func is not None:
return func(data)
pkey_ext = pathlib.PurePosixPath(self.physical_key.path).suffix
# Verify format can be handled before checking hash. Raises if none found.
formats = FormatRegistry.search(None, self._meta, pkey_ext)
# Verify hash before deserializing..
self._verify_hash(data)
return formats[0].deserialize(data, self._meta, pkey_ext, **format_opts)
def fetch(self, dest=None):
"""
Gets objects from entry and saves them to dest.
Args:
dest: where to put the files
Defaults to the entry name
Returns:
None
"""
if dest is None:
name = self.physical_key.basename()
dest = PhysicalKey.from_path('.').join(name)
else:
dest = PhysicalKey.from_url(fix_url(dest))
copy_file(self.physical_key, dest)
# return a package reroot package physical keys after the copy operation succeeds
# see GH#388 for context
return self.with_physical_key(dest)
def __call__(self, func=None, **kwargs):
"""
Shorthand for self.deserialize()
"""
return self.deserialize(func=func, **kwargs)
def with_physical_key(self, key):
return self.__class__(key, self.size, self.hash, self._meta)
@property
def physical_keys(self):
"""
Deprecated
"""
warnings.warn(
"PackageEntry.physical_keys is deprecated, use PackageEntry.physical_key instead.",
category=RemovedInQuilt4Warning,
stacklevel=2,
)
return [self.physical_key]
class Package:
""" In-memory representation of a package """
def __init__(self):
self._children = {}
self._meta = {'version': 'v0'}
@ApiTelemetry("package.__repr__")
def __repr__(self, max_lines=20):
"""
String representation of the Package.
"""
def _create_str(results_dict, level=0, parent=True):
"""
Creates a string from the results dict
"""
result = ''
keys = sorted(results_dict.keys())
if not keys:
return result
if parent:
has_remote_entries = any(
self._map(
lambda lk, entry: not entry.physical_key.is_local()
)
)
pkg_type = 'remote' if has_remote_entries else 'local'
result = f'({pkg_type} Package)\n'
for key in keys:
result += ' ' + (' ' * level) + '└─' + key + '\n'
result += _create_str(results_dict[key], level + 1, parent=False)
return result
if not self.keys():
return '(empty Package)'
# traverse the tree of package directories and entries to get the list of
# display objects. candidates is a deque of shape
# ((logical_key, Package | PackageEntry), [list of parent key])
candidates = deque(([x, []] for x in self._children.items()))
results_dict = {}
results_total = 0
more_objects_than_lines = False
while candidates:
[[logical_key, entry], parent_keys] = candidates.popleft()
if isinstance(entry, Package):
logical_key = logical_key + '/'
new_parent_keys = parent_keys.copy()
new_parent_keys.append(logical_key)
for child_key in sorted(entry.keys()):
candidates.append([[child_key, entry[child_key]], new_parent_keys])
current_result_level = results_dict
for key in parent_keys:
current_result_level = current_result_level[key]
current_result_level[logical_key] = {}
results_total += 1
if results_total >= max_lines:
more_objects_than_lines = True
break
repr_str = _create_str(results_dict)
# append '...' if the package is larger than max_size
if more_objects_than_lines:
repr_str += ' ' + '...\n'
return repr_str
@property
def meta(self):
return self._meta.get('user_meta', dict())
@classmethod
@ApiTelemetry("package.install")
def install(cls, name, registry=None, top_hash=None, dest=None, dest_registry=None, *, path=None):
"""
Installs a named package to the local registry and downloads its files.
Args:
name(str): Name of package to install. It also can be passed as NAME/PATH
(/PATH is deprecated, use the `path` parameter instead),
in this case only the sub-package or the entry specified by PATH will
be downloaded.
registry(str): Registry where package is located.
Defaults to the default remote registry.
top_hash(str): Hash of package to install. Defaults to latest.
dest(str): Local path to download files to.
dest_registry(str): Registry to install package to. Defaults to local registry.
path(str): If specified, downloads only `path` or its children.
"""
if registry is None:
registry = get_from_config('default_remote_registry')
if registry is None:
raise QuiltException(
"No registry specified and no default_remote_registry configured. Please "
"specify a registry or configure a default remote registry with quilt3.config"
)
else:
registry = fix_url(registry)
dest_registry = get_package_registry(dest_registry)
if not dest_registry.is_local:
raise QuiltException(
f"Can only 'install' to a local registry, but 'dest_registry' "
f"{dest_registry!r} is a remote path. To store a package in a remote "
f"registry, use 'push' or 'build' instead."
)
if dest is None:
dest_parsed = PhysicalKey.from_url(get_install_location()).join(name)
else:
dest_parsed = PhysicalKey.from_url(fix_url(dest))
if not dest_parsed.is_local():
raise QuiltException(
f"Invalid package destination path {dest!r}. 'dest', if set, must point at "
f"the local filesystem. To copy a package to a remote registry use 'push' or "
f"'build' instead."
)
parts = parse_sub_package_name(name)
if parts and parts[1]:
warnings.warn(
"Passing path via package name is deprecated, use the 'path' parameter instead.",
category=RemovedInQuilt4Warning,
stacklevel=3,
)
name, subpkg_key = parts
validate_key(subpkg_key)
if path:
raise ValueError("You must not pass path via package name and 'path' parameter.")
elif path:
validate_key(path)
subpkg_key = path
else:
subpkg_key = None
pkg = cls._browse(name=name, registry=registry, top_hash=top_hash)
message = pkg._meta.get('message', None) # propagate the package message
file_list = []
if subpkg_key is not None:
if subpkg_key not in pkg:
raise QuiltException(f"Package {name} doesn't contain {subpkg_key!r}.")
entry = pkg[subpkg_key]
entries = entry.walk() if isinstance(entry, Package) else ((subpkg_key.split('/')[-1], entry),)
else:
entries = pkg.walk()
for logical_key, entry in entries:
# Copy the datafiles in the package.
physical_key = entry.physical_key
# Try a local cache.
cached_file = ObjectPathCache.get(str(physical_key))
if cached_file is not None:
physical_key = PhysicalKey.from_path(cached_file)
new_physical_key = dest_parsed.join(logical_key)
if physical_key != new_physical_key:
file_list.append((physical_key, new_physical_key, entry.size))
def _maybe_add_to_cache(old: PhysicalKey, new: PhysicalKey, _):
if not old.is_local() and new.is_local():
ObjectPathCache.set(str(old), new.path)
copy_file_list(file_list, callback=_maybe_add_to_cache, message="Copying objects")
pkg._build(name, registry=dest_registry, message=message)
if top_hash is None:
top_hash = pkg.top_hash
short_top_hash = dest_registry.shorten_top_hash(name, top_hash)
print(f"Successfully installed package '{name}', tophash={short_top_hash} from {registry}")
@classmethod
def _parse_resolve_hash_args(cls, name, registry, hash_prefix):
return name, registry, hash_prefix
@staticmethod
def _parse_resolve_hash_args_old(registry, hash_prefix):
return None, registry, hash_prefix
@classmethod
def resolve_hash(cls, *args, **kwargs):
"""
Find a hash that starts with a given prefix.
Args:
name (str): name of package
registry (str): location of registry
hash_prefix (str): hash prefix with length between 6 and 64 characters
"""
try:
name, registry, hash_prefix = cls._parse_resolve_hash_args_old(*args, **kwargs)
except TypeError:
name, registry, hash_prefix = cls._parse_resolve_hash_args(*args, **kwargs)
validate_package_name(name)
else:
warnings.warn(
"Calling resolve_hash() without the 'name' parameter is deprecated.",
category=RemovedInQuilt4Warning,
stacklevel=2,
)
return get_package_registry(registry).resolve_top_hash(name, hash_prefix)
# This is needed for nice signature in docs.
resolve_hash.__func__.__signature__ = inspect.signature(_parse_resolve_hash_args.__func__)
@classmethod
@ApiTelemetry("package.browse")
def browse(cls, name, registry=None, top_hash=None):
"""
Load a package into memory from a registry without making a local copy of
the manifest.
Args:
name(string): name of package to load
registry(string): location of registry to load package from
top_hash(string): top hash of package version to load
"""
return cls._browse(name=name, registry=registry, top_hash=top_hash)
@classmethod
def _browse(cls, name, registry=None, top_hash=None):
validate_package_name(name)
registry = get_package_registry(registry)
top_hash = (
get_bytes(registry.pointer_latest_pk(name)).decode()
if top_hash is None else
registry.resolve_top_hash(name, top_hash)
)
pkg_manifest = registry.manifest_pk(name, top_hash)
if pkg_manifest.is_local():
local_pkg_manifest = pkg_manifest.path
else:
local_pkg_manifest = CACHE_PATH / "manifest" / _filesystem_safe_encode(str(pkg_manifest))
if not local_pkg_manifest.exists():
# Copy to a temporary file first, to make sure we don't cache a truncated file
# if the download gets interrupted.
tmp_path = local_pkg_manifest.with_suffix('.tmp')
copy_file(pkg_manifest, PhysicalKey.from_path(tmp_path), message="Downloading manifest")
tmp_path.rename(local_pkg_manifest)
return cls._from_path(local_pkg_manifest)
@classmethod
def _from_path(cls, path):
""" Takes a path and returns a package loaded from that path"""
with open(path) as open_file:
pkg = cls._load(open_file)
return pkg
@classmethod
def _split_key(cls, logical_key):
"""
Converts a string logical key like 'a/b/c' into a list of ['a', 'b', 'c'].
Returns the original key if it's already a list or a tuple.
"""
if isinstance(logical_key, str):
path = logical_key.split('/')
elif isinstance(logical_key, (tuple, list)):
path = logical_key
else:
raise TypeError('Invalid logical_key: %r' % logical_key)
return path
def __contains__(self, logical_key):
"""
Checks whether the package contains a specified logical_key.
Returns:
True or False
"""
try:
self[logical_key]
return True
except KeyError:
return False
def __getitem__(self, logical_key):
"""
Filters the package based on prefix, and returns either a new Package
or a PackageEntry.
Args:
prefix(str): prefix to filter on
Returns:
PackageEntry if prefix matches a logical_key exactly
otherwise Package
"""
pkg = self
for key_fragment in self._split_key(logical_key):
pkg = pkg._children[key_fragment]
return pkg
@ApiTelemetry("package.fetch")
def fetch(self, dest='./'):
"""
Copy all descendants to `dest`. Descendants are written under their logical
names _relative_ to self.
Args:
dest: where to put the files (locally)
Returns:
A new Package object with entries from self, but with physical keys
pointing to files in `dest`.
"""
nice_dest = PhysicalKey.from_url(fix_url(dest))
file_list = []
pkg = Package()
for logical_key, entry in self.walk():
physical_key = entry.physical_key
new_physical_key = nice_dest.join(logical_key)
file_list.append((physical_key, new_physical_key, entry.size))
# return a package reroot package physical keys after the copy operation succeeds
# see GH#388 for context
new_entry = entry.with_physical_key(new_physical_key)
pkg._set(logical_key, new_entry)
copy_file_list(file_list, message="Copying objects")
return pkg
def keys(self):
"""
Returns logical keys in the package.
"""
return self._children.keys()
def __iter__(self):
return iter(self._children)
def __len__(self):
return len(self._children)
def walk(self):
"""
Generator that traverses all entries in the package tree and returns tuples of (key, entry),
with keys in alphabetical order.
"""
for name, child in sorted(self._children.items()):
if isinstance(child, PackageEntry):
yield name, child
else:
for key, value in child.walk():
yield name + '/' + key, value
def _walk_dir_meta(self):
"""
Generator that traverses all entries in the package tree and returns
tuples of (key, meta) for each directory with metadata.
Keys will all end in '/' to indicate that they are directories.
"""
for key, child in sorted(self._children.items()):
if isinstance(child, PackageEntry):
continue
meta = child.meta
if meta:
yield key + '/', meta
for child_key, child_meta in child._walk_dir_meta():
yield key + '/' + child_key, child_meta
@classmethod
@ApiTelemetry("package.load")
def load(cls, readable_file):
"""
Loads a package from a readable file-like object.
Args:
readable_file: readable file-like object to deserialize package from
Returns:
A new Package object
Raises:
file not found
json decode error
invalid package exception
"""
return cls._load(readable_file=readable_file)
@classmethod
def _load(cls, readable_file):
gc.disable() # Experiments with COCO (650MB manifest) show disabling GC gives us ~2x performance improvement
try:
line_count = 0
for _ in readable_file:
line_count += 1
readable_file.seek(0)
reader = jsonlines.Reader(
tqdm(readable_file, desc="Loading manifest", total=line_count, unit="entries", disable=DISABLE_TQDM),
loads=json.loads,
)
meta = reader.read()
meta.pop('top_hash', None) # Obsolete as of PR #130
pkg = cls()
pkg._meta = meta
for obj in reader:
path = cls._split_key(obj.pop('logical_key'))
subpkg = pkg._ensure_subpackage(path[:-1])
key = path[-1]
if not obj.get('physical_keys', None):
# directory-level metadata
subpkg.set_meta(obj['meta'])
continue
if key in subpkg._children:
raise PackageException("Duplicate logical key while loading package")
subpkg._children[key] = PackageEntry(
PhysicalKey.from_url(obj['physical_keys'][0]),
obj['size'],
obj['hash'],
obj['meta'],
)
finally:
gc.enable()
return pkg
def set_dir(self, lkey, path=None, meta=None):
"""
Adds all files from `path` to the package.
Recursively enumerates every file in `path`, and adds them to
the package according to their relative location to `path`.
Args:
lkey(string): prefix to add to every logical key,
use '/' for the root of the package.
path(string): path to scan for files to add to package.
If None, lkey will be substituted in as the path.
meta(dict): user level metadata dict to attach to lkey directory entry.
Returns:
self
Raises:
When `path` doesn't exist
"""
lkey = lkey.strip("/")
if not lkey or lkey == '.' or lkey == './':
root = self
else:
validate_key(lkey)
root = self._ensure_subpackage(self._split_key(lkey))
root.set_meta(meta)
if path:
src = PhysicalKey.from_url(fix_url(path))
else:
src = PhysicalKey.from_path(lkey)
# TODO: deserialization metadata
if src.is_local():
src_path = pathlib.Path(src.path)
if not src_path.is_dir():
raise PackageException("The specified directory doesn't exist")
files = src_path.rglob('*')
ignore = src_path / '.quiltignore'
if ignore.exists():
files = quiltignore_filter(files, ignore, 'file')
for f in files:
if not f.is_file():
continue
entry = PackageEntry(PhysicalKey.from_path(f), f.stat().st_size, None, None)
logical_key = f.relative_to(src_path).as_posix()
root._set(logical_key, entry)
else:
if src.version_id is not None:
raise PackageException("Directories cannot have versions")
src_path = src.path
if src.basename() != '':
src_path += '/'
objects, _ = list_object_versions(src.bucket, src_path)
for obj in objects:
if not obj['IsLatest']:
continue
# Skip S3 pseduo directory files and Keys that end in /
if obj['Key'].endswith('/'):
if obj['Size'] != 0:
warnings.warn(f'Logical keys cannot end in "/", skipping: {obj["Key"]}')
continue
obj_pk = PhysicalKey(src.bucket, obj['Key'], obj.get('VersionId'))
entry = PackageEntry(obj_pk, obj['Size'], None, None)
logical_key = obj['Key'][len(src_path):]
root._set(logical_key, entry)
return self
def get(self, logical_key):
"""
Gets object from logical_key and returns its physical path.
Equivalent to self[logical_key].get().
Args:
logical_key(string): logical key of the object to get
Returns:
Physical path as a string.
Raises:
KeyError: when logical_key is not present in the package
ValueError: if the logical_key points to a Package rather than PackageEntry.
"""
obj = self[logical_key]
if not isinstance(obj, PackageEntry):
raise ValueError("Key does not point to a PackageEntry")
return obj.get()
def readme(self):
"""
Returns the README PackageEntry
The README is the entry with the logical key 'README.md' (case-sensitive). Will raise a QuiltException if
no such entry exists.
"""
if "README.md" not in self:
ex_msg = "This Package is missing a README file. A Quilt recognized README file is a file named " \
"'README.md' (case-insensitive)"
raise QuiltException(ex_msg)
return self["README.md"]
def set_meta(self, meta):
"""
Sets user metadata on this Package.
"""
self._meta['user_meta'] = meta
return self
def _fix_sha256(self):
"""
Calculate and set missing hash values
"""
self._incomplete_entries = [entry for key, entry in self.walk() if entry.hash is None]
physical_keys = []
sizes = []
for entry in self._incomplete_entries:
physical_keys.append(entry.physical_key)
sizes.append(entry.size)
results = calculate_sha256(physical_keys, sizes)
exc = None
for entry, obj_hash in zip(self._incomplete_entries, results):
if isinstance(obj_hash, Exception):
exc = obj_hash
else:
entry.hash = dict(type='SHA256', value=obj_hash)
if exc:
incomplete_manifest_path = self._dump_manifest_to_scratch()
msg = "Unable to reach S3 for some hash values. Incomplete manifest saved to {path}."
raise PackageException(msg.format(path=incomplete_manifest_path)) from exc
def _set_commit_message(self, msg):
"""
Sets a commit message.
Args:
msg: a message string
Returns:
None
Raises:
a ValueError if msg is not a string
"""
if msg is not None and not isinstance(msg, str):
raise ValueError(
f"The package commit message must be a string, but the message provided is an "
f"instance of {type(msg)}."
)
self._meta.update({'message': msg})
def _dump_manifest_to_scratch(self):
registry = get_from_config('default_local_registry')
registry_parsed = PhysicalKey.from_url(registry)
pkg_manifest_file = registry_parsed.join("scratch").join(str(int(time.time())))
manifest = io.BytesIO()
self._dump(manifest)
put_bytes(
manifest.getvalue(),
pkg_manifest_file
)
return pkg_manifest_file.path
@ApiTelemetry("package.build")
def build(self, name, registry=None, message=None):
"""
Serializes this package to a registry.
Args:
name: optional name for package
registry: registry to build to
defaults to local registry
message: the commit message of the package
Returns:
The top hash as a string.
"""
return self._build(name=name, registry=registry, message=message)
def _build(self, name, registry, message):
validate_package_name(name)
registry = get_package_registry(registry)
self._set_commit_message(message)
self._fix_sha256()
manifest = io.BytesIO()
self._dump(manifest)
top_hash = self.top_hash
registry.push_manifest(name, top_hash, manifest.getvalue())
return top_hash
@ApiTelemetry("package.dump")
def dump(self, writable_file):
"""
Serializes this package to a writable file-like object.
Args:
writable_file: file-like object to write serialized package.
Returns:
None
Raises:
fail to create file
fail to finish write
"""
return self._dump(writable_file)
def _dump(self, writable_file):
writer = jsonlines.Writer(writable_file)
for line in self.manifest:
writer.write(line)
@property
def manifest(self):
"""
Provides a generator of the dicts that make up the serialized package.
"""
yield self._meta
for dir_key, meta in self._walk_dir_meta():
yield {'logical_key': dir_key, 'meta': meta}
for logical_key, entry in self.walk():
yield {'logical_key': logical_key, **entry.as_dict()}
def set(self, logical_key, entry=None, meta=None, serialization_location=None, serialization_format_opts=None):
"""
Returns self with the object at logical_key set to entry.
Args:
logical_key(string): logical key to update
entry(PackageEntry OR string OR object): new entry to place at logical_key in the package.
If entry is a string, it is treated as a URL, and an entry is created based on it.
If entry is None, the logical key string will be substituted as the entry value.
If entry is an object and quilt knows how to serialize it, it will immediately be serialized and
written to disk, either to serialization_location or to a location managed by quilt. List of types that
Quilt can serialize is available by calling `quilt3.formats.FormatRegistry.all_supported_formats()`
meta(dict): user level metadata dict to attach to entry
serialization_format_opts(dict): Optional. If passed in, only used if entry is an object. Options to help
Quilt understand how the object should be serialized. Useful for underspecified file formats like csv
when content contains confusing characters. Will be passed as kwargs to the FormatHandler.serialize()
function. See docstrings for individual FormatHandlers for full list of options -
https://github.com/quiltdata/quilt/blob/master/api/python/quilt3/formats.py
serialization_location(string): Optional. If passed in, only used if entry is an object. Where the
serialized object should be written, e.g. "./mydataframe.parquet"
Returns:
self
"""
return self._set(logical_key=logical_key,
entry=entry,
meta=meta,
serialization_location=serialization_location,
serialization_format_opts=serialization_format_opts)
def _set(self, logical_key, entry=None, meta=None, serialization_location=None, serialization_format_opts=None):
if not logical_key or logical_key.endswith('/'):
raise QuiltException(
f"Invalid logical key {logical_key!r}. "
f"A package entry logical key cannot be a directory."
)
validate_key(logical_key)
if entry is None:
entry = pathlib.Path(logical_key).resolve().as_uri()
if isinstance(entry, (str, os.PathLike)):
src = PhysicalKey.from_url(fix_url(str(entry)))
size, version_id = get_size_and_version(src)
# Determine if a new version needs to be appended.
if not src.is_local() and src.version_id is None and version_id is not None:
src.version_id = version_id
entry = PackageEntry(src, size, None, None)
elif isinstance(entry, PackageEntry):
assert meta is None
elif FormatRegistry.object_is_serializable(entry):
# Use file extension from serialization_location, fall back to file extension from logical_key
# If neither has a file extension, Quilt picks the serialization format.
logical_key_ext = extract_file_extension(logical_key)
serialize_loc_ext = None
if serialization_location is not None:
serialize_loc_ext = extract_file_extension(serialization_location)
if logical_key_ext is not None and serialize_loc_ext is not None:
assert logical_key_ext == serialize_loc_ext, f"The logical_key and the serialization_location have " \
f"different file extensions: {logical_key_ext} vs " \
f"{serialize_loc_ext}. Quilt doesn't know which to use!"
if serialize_loc_ext is not None:
ext = serialize_loc_ext
elif logical_key_ext is not None:
ext = logical_key_ext
else:
ext = None
format_handlers = FormatRegistry.search(type(entry))
if ext:
format_handlers = [f for f in format_handlers if ext in f.handled_extensions]
if len(format_handlers) == 0:
error_message = f'Quilt does not know how to serialize a {type(entry)}'
if ext is not None:
error_message += f' as a {ext} file.'
error_message += '. If you think this should be supported, please open an issue or PR at ' \
'https://github.com/quiltdata/quilt'
raise QuiltException(error_message)
if serialization_format_opts is None:
serialization_format_opts = {}
serialized_object_bytes, new_meta = format_handlers[0].serialize(entry, meta=None, ext=ext,
**serialization_format_opts)
if serialization_location is None:
serialization_path = APP_DIR_TEMPFILE_DIR / str(uuid.uuid4())
if ext:
serialization_path = serialization_path.with_suffix(f'.{ext}')
else:
serialization_path = pathlib.Path(serialization_location).expanduser().resolve()
serialization_path.parent.mkdir(exist_ok=True, parents=True)
serialization_path.write_bytes(serialized_object_bytes)
size = serialization_path.stat().st_size
write_pk = PhysicalKey.from_path(serialization_path)
entry = PackageEntry(write_pk, size, hash_obj=None, meta=new_meta)
else:
raise TypeError(f"Expected a string for entry, but got an instance of {type(entry)}.")
if meta is not None:
entry.set_meta(meta)
path = self._split_key(logical_key)
pkg = self._ensure_subpackage(path[:-1], ensure_no_entry=True)
if path[-1] in pkg and isinstance(pkg[path[-1]], Package):
raise QuiltException("Cannot overwrite directory with PackageEntry")
pkg._children[path[-1]] = entry
return self
def _ensure_subpackage(self, path, ensure_no_entry=False):
"""
Creates a package and any intermediate packages at the given path.
Args:
path(list): logical key as a list or tuple
ensure_no_entry(boolean): if True, throws if this would overwrite
a PackageEntry that already exists in the tree.
Returns:
newly created or existing package at that path
"""
pkg = self
for key_fragment in path:
if ensure_no_entry and key_fragment in pkg \
and isinstance(pkg[key_fragment], PackageEntry):
raise QuiltException("Already a PackageEntry along the path.")
pkg = pkg._children.setdefault(key_fragment, Package())
return pkg
def delete(self, logical_key):
"""
Returns the package with logical_key removed.
Returns:
self
Raises:
KeyError: when logical_key is not present to be deleted
"""
path = self._split_key(logical_key)
pkg = self[path[:-1]]
del pkg._children[path[-1]]
return self
@property
def top_hash(self):
"""
Returns the top hash of the package.
Note that physical keys are not hashed because the package has
the same semantics regardless of where the bytes come from.
Returns:
A string that represents the top hash of the package
"""
top_hash = hashlib.sha256()
assert 'top_hash' not in self._meta
top_meta = json.dumps(self._meta, sort_keys=True, separators=(',', ':'))
top_hash.update(top_meta.encode('utf-8'))
for logical_key, entry in self.walk():
if entry.hash is None or entry.size is None:
raise QuiltException(
"PackageEntry missing hash and/or size: %s" % entry.physical_key
)
entry_dict = entry.as_dict()
entry_dict['logical_key'] = logical_key
entry_dict.pop('physical_keys', None)
entry_dict_str = json.dumps(entry_dict, sort_keys=True, separators=(',', ':'))
top_hash.update(entry_dict_str.encode('utf-8'))
return top_hash.hexdigest()
@ApiTelemetry("package.push")
def push(self, name, registry=None, dest=None, message=None, selector_fn=None):
"""
Copies objects to path, then creates a new package that points to those objects.
Copies each object in this package to path according to logical key structure,
then adds to the registry a serialized version of this package with
physical keys that point to the new copies.
Note that push is careful to not push data unnecessarily. To illustrate, imagine you have
a PackageEntry: `pkg["entry_1"].physical_key = "/tmp/package_entry_1.json"`
If that entry would be pushed to `s3://bucket/prefix/entry_1.json`, but
`s3://bucket/prefix/entry_1.json` already contains the exact same bytes as
'/tmp/package_entry_1.json', `quilt3` will not push the bytes to s3, no matter what
`selector_fn('entry_1', pkg["entry_1"])` returns.
However, selector_fn will dictate whether the new package points to the local file or to s3:
If `selector_fn('entry_1', pkg["entry_1"]) == False`,
`new_pkg["entry_1"] = ["/tmp/package_entry_1.json"]`
If `selector_fn('entry_1', pkg["entry_1"]) == True`,
`new_pkg["entry_1"] = ["s3://bucket/prefix/entry_1.json"]`
Args:
name: name for package in registry
dest: where to copy the objects in the package
registry: registry where to create the new package
message: the commit message for the new package
selector_fn: An optional function that determines which package entries should be copied to S3.
The function takes in two arguments, logical_key and package_entry, and should return False if that
PackageEntry should be skipped during push. If for example you have a package where the files
are spread over multiple buckets and you add a single local file, you can use selector_fn to
only push the local file to s3 (instead of pushing all data to the destination bucket).
Returns:
A new package that points to the copied objects.
"""
if selector_fn is None:
def selector_fn(*args):
return True
validate_package_name(name)
if registry is None:
registry = get_from_config('default_remote_registry')
if registry is None:
raise QuiltException(
"No registry specified and no default remote registry configured. Please "
"specify a registry or configure a default remote registry with quilt3.config"
)
registry_parsed = PhysicalKey.from_url(fix_url(registry))
else:
registry_parsed = PhysicalKey.from_url(fix_url(registry))
if not registry_parsed.is_local():
if registry_parsed.path != '':
raise QuiltException(
f"The 'registry' argument expects an S3 bucket but the S3 object path "
f"{registry!r} was provided instead. You probably wanted to set "
f"'registry' to {'s3://' + registry_parsed.bucket!r} instead. To specify that package "
f"data land in a specific directory use 'dest'."
)
else:
raise QuiltException(
f"Can only 'push' to remote registries in S3, but {registry!r} "
f"is a local file. To store a package in the local registry, use "
f"'build' instead."
)
if dest is None:
dest_parsed = registry_parsed.join(name)
else:
dest_parsed = PhysicalKey.from_url(fix_url(dest))
if dest_parsed.bucket != registry_parsed.bucket:
raise QuiltException(
f"Invalid package destination path {dest!r}. 'dest', if set, must be a path "
f"in the {registry!r} package registry specified by 'registry'."
)
self._fix_sha256()
pkg = self.__class__()
pkg._meta = self._meta
# Since all that is modified is physical keys, pkg will have the same top hash
file_list = []
entries = []
for logical_key, entry in self.walk():
if not selector_fn(logical_key, entry):
pkg._set(logical_key, entry)
continue
# Copy the datafiles in the package.
physical_key = entry.physical_key
new_physical_key = dest_parsed.join(logical_key)
if (
physical_key.bucket == new_physical_key.bucket and
physical_key.path == new_physical_key.path
):
# No need to copy - re-use the original physical key.
pkg._set(logical_key, entry)
else:
entries.append((logical_key, entry))
file_list.append((physical_key, new_physical_key, entry.size))
results = copy_file_list(file_list, message="Copying objects")
for (logical_key, entry), versioned_key in zip(entries, results):
# Create a new package entry pointing to the new remote key.
assert versioned_key is not None
new_entry = entry.with_physical_key(versioned_key)
pkg._set(logical_key, new_entry)
def physical_key_is_temp_file(pk):
if not pk.is_local():
return False
return pathlib.Path(pk.path).parent == APP_DIR_TEMPFILE_DIR
temp_file_logical_keys = [lk for lk, entry in self.walk() if physical_key_is_temp_file(entry.physical_key)]
temp_file_physical_keys = [self[lk].physical_key for lk in temp_file_logical_keys]
# Now that data has been pushed, delete tmp files created by pkg.set('KEY', obj)
with Pool(10) as p:
p.map(_delete_local_physical_key, temp_file_physical_keys)
# Update old package to point to the materialized location of the file since the tempfile no longest exists
for lk in temp_file_logical_keys:
self._set(lk, pkg[lk])
top_hash = pkg._build(name, registry=registry, message=message)
shorthash = get_package_registry(registry).shorten_top_hash(name, top_hash)
print(f"Package {name}@{shorthash} pushed to s3://{dest_parsed.bucket}")
if user_is_configured_to_custom_stack():
navigator_url = get_from_config("navigator_url")
print(f"Successfully pushed the new package to "
f"{catalog_package_url(navigator_url, dest_parsed.bucket, name)}")
else:
dest_s3_url = str(dest_parsed)
if not dest_s3_url.endswith("/"):
dest_s3_url += "/"
print(f"Run `quilt3 catalog {dest_s3_url}` to browse.")
print("Successfully pushed the new package")
return pkg
@classmethod
def rollback(cls, name, registry, top_hash):
"""
Set the "latest" version to the given hash.
Args:
name(str): Name of package to rollback.
registry(str): Registry where package is located.
top_hash(str): Hash to rollback to.
"""
validate_package_name(name)
registry = get_package_registry(PhysicalKey.from_url(fix_url(registry)))
top_hash = registry.resolve_top_hash(name, top_hash)
# Check that both latest and top_hash actually exist.
get_size_and_version(registry.manifest_pk(name, top_hash))
latest_path = registry.pointer_latest_pk(name)
get_size_and_version(registry.pointer_latest_pk(name))
put_bytes(top_hash.encode('utf-8'), latest_path)
@ApiTelemetry("package.diff")
def diff(self, other_pkg):
"""
Returns three lists -- added, modified, deleted.
Added: present in other_pkg but not in self.
Modified: present in both, but different.
Deleted: present in self, but not other_pkg.
Args:
other_pkg: Package to diff
Returns:
added, modified, deleted (all lists of logical keys)
"""
deleted = []
modified = []
other_entries = dict(other_pkg.walk())
for lk, entry in self.walk():
other_entry = other_entries.pop(lk, None)
if other_entry is None:
deleted.append(lk)
elif entry != other_entry:
modified.append(lk)
added = list(sorted(other_entries))
return added, modified, deleted
@ApiTelemetry("package.map")
def map(self, f, include_directories=False):
"""
Performs a user-specified operation on each entry in the package.
Args:
f(x, y): function
The function to be applied to each package entry.
It should take two inputs, a logical key and a PackageEntry.
include_directories: bool
Whether or not to include directory entries in the map.
Returns: list
The list of results generated by the map.
"""
return self._map(f, include_directories=include_directories)
def _map(self, f, include_directories=False):
if include_directories:
for lk, _ in self._walk_dir_meta():
yield f(lk, self[lk.rstrip("/")])
for lk, entity in self.walk():
yield f(lk, entity)
@ApiTelemetry("package.filter")
def filter(self, f, include_directories=False):
"""
Applies a user-specified operation to each entry in the package,
removing results that evaluate to False from the output.
Args:
f(x, y): function
The function to be applied to each package entry.
It should take two inputs, a logical key and a PackageEntry.
This function should return a boolean.
include_directories: bool
Whether or not to include directory entries in the map.
Returns:
A new package with entries that evaluated to False removed
"""
return self._filter(f=f, include_directories=include_directories)
def _filter(self, f, include_directories=False):
p = Package()
excluded_dirs = set()
if include_directories:
for lk, _ in self._walk_dir_meta():
if not f(lk, self[lk.rstrip("/")]):
excluded_dirs.add(lk)
for lk, entity in self.walk():
if (not any(p in excluded_dirs
for p in pathlib.PurePosixPath(lk).parents)
and f(lk, entity)):
p._set(lk, entity)
return p
def verify(self, src, extra_files_ok=False):
"""
Check if the contents of the given directory matches the package manifest.
Args:
src(str): URL of the directory
extra_files_ok(bool): Whether extra files in the directory should cause a failure.
Returns:
True if the package matches the directory; False otherwise.
"""
src = PhysicalKey.from_url(fix_url(src))
src_dict = dict(list_url(src))
url_list = []
size_list = []
for logical_key, entry in self.walk():
src_size = src_dict.pop(logical_key, None)
if src_size is None:
return False
if entry.size != src_size:
return False
entry_url = src.join(logical_key)
url_list.append(entry_url)
size_list.append(src_size)
if src_dict and not extra_files_ok:
return False
hash_list = calculate_sha256(url_list, size_list)
for (logical_key, entry), url_hash in zip(self.walk(), hash_list):
if isinstance(url_hash, Exception):
raise url_hash
if entry.hash['value'] != url_hash:
return False
return True
| 1 | 18,743 | We should raise `ValueError` if `update_policy not in ['existing', 'incoming']`. (or `in Enum.__members__` or however we want to express legal values) | quiltdata-quilt | py |
@@ -42,10 +42,11 @@ namespace MvvmCross.iOS.Platform
_applicationDelegate = applicationDelegate;
}
- protected MvxIosSetup(IMvxApplicationDelegate applicationDelegate, IMvxIosViewPresenter presenter)
+ protected MvxIosSetup(IMvxApplicationDelegate applicationDelegate, UIWindow window, IMvxIosViewPresenter presenter) : this (applicationDelegate, window)
{
- _presenter = presenter;
+ _window = window;
_applicationDelegate = applicationDelegate;
+ _presenter = presenter;
}
protected UIWindow Window => _window; | 1 | // MvxIosSetup.cs
// MvvmCross is licensed using Microsoft Public License (Ms-PL)
// Contributions and inspirations noted in readme.md and license.txt
//
// Project Lead - Stuart Lodge, @slodge, [email protected]
using System;
using System.Collections.Generic;
using System.Reflection;
using MvvmCross.Binding;
using MvvmCross.Binding.Binders;
using MvvmCross.Binding.BindingContext;
using MvvmCross.Binding.Bindings.Target.Construction;
using MvvmCross.Binding.iOS;
using MvvmCross.Core.Platform;
using MvvmCross.Core.ViewModels;
using MvvmCross.Core.Views;
using MvvmCross.iOS.Views;
using MvvmCross.iOS.Views.Presenters;
using MvvmCross.Platform;
using MvvmCross.Platform.Converters;
using MvvmCross.Platform.iOS.Platform;
using MvvmCross.Platform.iOS.Views;
using MvvmCross.Platform.Platform;
using MvvmCross.Platform.Plugins;
using UIKit;
namespace MvvmCross.iOS.Platform
{
public abstract class MvxIosSetup
: MvxSetup
{
private readonly IMvxApplicationDelegate _applicationDelegate;
private readonly UIWindow _window;
private IMvxIosViewPresenter _presenter;
protected MvxIosSetup(IMvxApplicationDelegate applicationDelegate, UIWindow window)
{
_window = window;
_applicationDelegate = applicationDelegate;
}
protected MvxIosSetup(IMvxApplicationDelegate applicationDelegate, IMvxIosViewPresenter presenter)
{
_presenter = presenter;
_applicationDelegate = applicationDelegate;
}
protected UIWindow Window => _window;
protected IMvxApplicationDelegate ApplicationDelegate => _applicationDelegate;
protected override IMvxTrace CreateDebugTrace()
{
return new MvxDebugTrace();
}
protected override IMvxPluginManager CreatePluginManager()
{
return new MvxPluginManager();
}
protected sealed override IMvxViewsContainer CreateViewsContainer()
{
var container = CreateIosViewsContainer();
RegisterIosViewCreator(container);
return container;
}
protected virtual IMvxIosViewsContainer CreateIosViewsContainer()
{
return new MvxIosViewsContainer();
}
protected virtual void RegisterIosViewCreator(IMvxIosViewsContainer container)
{
Mvx.RegisterSingleton<IMvxIosViewCreator>(container);
Mvx.RegisterSingleton<IMvxCurrentRequest>(container);
}
protected override IMvxViewDispatcher CreateViewDispatcher()
{
return new MvxIosViewDispatcher(Presenter);
}
protected override void InitializePlatformServices()
{
RegisterPlatformProperties();
RegisterPresenter();
RegisterLifetime();
}
protected virtual void RegisterPlatformProperties()
{
Mvx.RegisterSingleton<IMvxIosSystem>(CreateIosSystemProperties());
}
protected virtual MvxIosSystem CreateIosSystemProperties()
{
return new MvxIosSystem();
}
protected virtual void RegisterLifetime()
{
Mvx.RegisterSingleton<IMvxLifetime>(_applicationDelegate);
}
protected IMvxIosViewPresenter Presenter
{
get
{
_presenter = _presenter ?? CreatePresenter();
return _presenter;
}
}
protected virtual IMvxIosViewPresenter CreatePresenter()
{
return new MvxIosViewPresenter(_applicationDelegate, _window);
}
protected virtual void RegisterPresenter()
{
var presenter = Presenter;
Mvx.RegisterSingleton(presenter);
Mvx.RegisterSingleton<IMvxIosModalHost>(presenter);
}
protected override void InitializeLastChance()
{
InitializeBindingBuilder();
base.InitializeLastChance();
}
protected virtual void InitializeBindingBuilder()
{
RegisterBindingBuilderCallbacks();
var bindingBuilder = CreateBindingBuilder();
bindingBuilder.DoRegistration();
}
protected virtual void RegisterBindingBuilderCallbacks()
{
Mvx.CallbackWhenRegistered<IMvxValueConverterRegistry>(FillValueConverters);
Mvx.CallbackWhenRegistered<IMvxTargetBindingFactoryRegistry>(FillTargetFactories);
Mvx.CallbackWhenRegistered<IMvxBindingNameRegistry>(FillBindingNames);
}
protected virtual MvxBindingBuilder CreateBindingBuilder()
{
return new MvxIosBindingBuilder();
}
protected virtual void FillBindingNames(IMvxBindingNameRegistry obj)
{
// this base class does nothing
}
protected virtual void FillValueConverters(IMvxValueConverterRegistry registry)
{
registry.Fill(ValueConverterAssemblies);
registry.Fill(ValueConverterHolders);
}
protected virtual List<Type> ValueConverterHolders => new List<Type>();
protected virtual IEnumerable<Assembly> ValueConverterAssemblies
{
get
{
var toReturn = new List<Assembly>();
toReturn.AddRange(GetViewModelAssemblies());
toReturn.AddRange(GetViewAssemblies());
return toReturn;
}
}
protected virtual void FillTargetFactories(IMvxTargetBindingFactoryRegistry registry)
{
// this base class does nothing
}
protected override IMvxNameMapping CreateViewToViewModelNaming()
{
return new MvxPostfixAwareViewToViewModelNameMapping("View", "ViewController");
}
}
} | 1 | 12,963 | `_applicationDelegate` and `_window` are already set in the call for `: this (applicationDelegate, window)`, no need to assigning them again here | MvvmCross-MvvmCross | .cs |
@@ -9,7 +9,7 @@ export function createContext(defaultValue) {
_id: '__cC' + i++,
_defaultValue: defaultValue,
Consumer(props, context) {
- return props.children(context);
+ return props.children(props.selector(context));
},
Provider(props) {
if (!this.getChildContext) { | 1 | import { enqueueRender } from './component';
export let i = 0;
export function createContext(defaultValue) {
const ctx = {};
const context = {
_id: '__cC' + i++,
_defaultValue: defaultValue,
Consumer(props, context) {
return props.children(context);
},
Provider(props) {
if (!this.getChildContext) {
const subs = [];
this.getChildContext = () => {
ctx[context._id] = this;
return ctx;
};
this.shouldComponentUpdate = _props => {
if (this.props.value !== _props.value) {
subs.some(c => {
c.context = _props.value;
enqueueRender(c);
});
}
};
this.sub = c => {
subs.push(c);
let old = c.componentWillUnmount;
c.componentWillUnmount = () => {
subs.splice(subs.indexOf(c), 1);
old && old.call(c);
};
};
}
return props.children;
}
};
context.Consumer.contextType = context;
return context;
}
| 1 | 15,521 | We might not have a selector prop in the consumer | preactjs-preact | js |
@@ -386,7 +386,7 @@ namespace Nethermind.Blockchain.Processing
break;
}
- bool isFastSyncTransition = _blockTree.Head?.Header == _blockTree.Genesis && toBeProcessed.Number > 1;
+ bool isFastSyncTransition = (_blockTree.Head?.IsGenesis ?? false) && toBeProcessed.Number > 1;
if (!isFastSyncTransition)
{
if (_logger.IsTrace) _logger.Trace($"Finding parent of {toBeProcessed.ToString(Block.Format.Short)}"); | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using Nethermind.Blockchain.Find;
using Nethermind.Core;
using Nethermind.Core.Attributes;
using Nethermind.Core.Crypto;
using Nethermind.Dirichlet.Numerics;
using Nethermind.Evm.Tracing;
using Nethermind.Logging;
namespace Nethermind.Blockchain.Processing
{
public class BlockchainProcessor : IBlockchainProcessor, IBlockProcessingQueue
{
private readonly IBlockProcessor _blockProcessor;
private readonly IBlockDataRecoveryStep _recoveryStep;
private readonly bool _storeReceiptsByDefault;
private readonly IBlockTree _blockTree;
private readonly ILogger _logger;
private readonly BlockingCollection<BlockRef> _recoveryQueue = new BlockingCollection<BlockRef>(new ConcurrentQueue<BlockRef>());
private readonly BlockingCollection<BlockRef> _blockQueue = new BlockingCollection<BlockRef>(new ConcurrentQueue<BlockRef>(), MaxProcessingQueueSize);
private readonly ProcessingStats _stats;
private CancellationTokenSource _loopCancellationSource;
private Task _recoveryTask;
private Task _processorTask;
private int _currentRecoveryQueueSize;
public int SoftMaxRecoveryQueueSizeInTx = 10000; // adjust based on tx or gas
private const int MaxProcessingQueueSize = 2000; // adjust based on tx or gas
/// <summary>
///
/// </summary>
/// <param name="blockTree"></param>
/// <param name="blockProcessor"></param>
/// <param name="recoveryStep"></param>
/// <param name="logManager"></param>
/// <param name="storeReceiptsByDefault"></param>
/// <param name="autoProcess">Registers for OnNewHeadBlock events at block tree.</param>
public BlockchainProcessor(
IBlockTree blockTree,
IBlockProcessor blockProcessor,
IBlockDataRecoveryStep recoveryStep,
ILogManager logManager,
bool storeReceiptsByDefault,
bool autoProcess = true)
{
_logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
_blockTree = blockTree ?? throw new ArgumentNullException(nameof(blockTree));
_blockProcessor = blockProcessor ?? throw new ArgumentNullException(nameof(blockProcessor));
_recoveryStep = recoveryStep ?? throw new ArgumentNullException(nameof(recoveryStep));
_storeReceiptsByDefault = storeReceiptsByDefault;
if (autoProcess)
{
_blockTree.NewBestSuggestedBlock += OnNewBestBlock;
}
_stats = new ProcessingStats(_logger);
}
private void OnNewBestBlock(object sender, BlockEventArgs blockEventArgs)
{
ProcessingOptions options = ProcessingOptions.None;
if (_storeReceiptsByDefault)
{
options |= ProcessingOptions.StoreReceipts;
}
Enqueue(blockEventArgs.Block, options);
}
public void Enqueue(Block block, ProcessingOptions processingOptions)
{
if (_logger.IsTrace) _logger.Trace($"Enqueuing a new block {block.ToString(Block.Format.Short)} for processing.");
int currentRecoveryQueueSize = Interlocked.Add(ref _currentRecoveryQueueSize, block.Transactions.Length);
BlockRef blockRef = currentRecoveryQueueSize >= SoftMaxRecoveryQueueSizeInTx ? new BlockRef(block.Hash, processingOptions) : new BlockRef(block, processingOptions);
if (!_recoveryQueue.IsAddingCompleted)
{
try
{
_recoveryQueue.Add(blockRef);
if (_logger.IsTrace) _logger.Trace($"A new block {block.ToString(Block.Format.Short)} enqueued for processing.");
}
catch (InvalidOperationException)
{
if (!_recoveryQueue.IsAddingCompleted)
{
throw;
}
}
}
}
public void Start()
{
_loopCancellationSource = new CancellationTokenSource();
_recoveryTask = Task.Factory.StartNew(
RunRecoveryLoop,
_loopCancellationSource.Token,
TaskCreationOptions.LongRunning,
TaskScheduler.Default).ContinueWith(t =>
{
if (t.IsFaulted)
{
if (_logger.IsError) _logger.Error("Sender address recovery encountered an exception.", t.Exception);
}
else if (t.IsCanceled)
{
if (_logger.IsDebug) _logger.Debug("Sender address recovery stopped.");
}
else if (t.IsCompleted)
{
if (_logger.IsDebug) _logger.Debug("Sender address recovery complete.");
}
});
_processorTask = Task.Factory.StartNew(
RunProcessingLoop,
_loopCancellationSource.Token,
TaskCreationOptions.LongRunning,
TaskScheduler.Default).ContinueWith(t =>
{
if (t.IsFaulted)
{
if (_logger.IsError) _logger.Error($"{nameof(BlockchainProcessor)} encountered an exception.", t.Exception);
}
else if (t.IsCanceled)
{
if (_logger.IsDebug) _logger.Debug($"{nameof(BlockchainProcessor)} stopped.");
}
else if (t.IsCompleted)
{
if (_logger.IsDebug) _logger.Debug($"{nameof(BlockchainProcessor)} complete.");
}
});
}
public async Task StopAsync(bool processRemainingBlocks = false)
{
if (processRemainingBlocks)
{
_recoveryQueue.CompleteAdding();
await _recoveryTask;
_blockQueue.CompleteAdding();
}
else
{
_loopCancellationSource.Cancel();
_recoveryQueue.CompleteAdding();
_blockQueue.CompleteAdding();
}
await Task.WhenAll(_recoveryTask, _processorTask);
if (_logger.IsInfo) _logger.Info("Blockchain Processor shutdown complete.. please wait for all components to close");
}
private void RunRecoveryLoop()
{
if (_logger.IsDebug) _logger.Debug($"Starting recovery loop - {_blockQueue.Count} blocks waiting in the queue.");
foreach (BlockRef blockRef in _recoveryQueue.GetConsumingEnumerable(_loopCancellationSource.Token))
{
if (!blockRef.Resolve(_blockTree))
{
if (_logger.IsTrace) _logger.Trace("Block was removed from the DB and cannot be recovered (it belonged to an invalid branch). Skipping.");
continue;
}
Interlocked.Add(ref _currentRecoveryQueueSize, -blockRef.Block.Transactions.Length);
if (_logger.IsTrace) _logger.Trace($"Recovering addresses for block {blockRef.BlockHash?.ToString() ?? blockRef.Block.ToString(Block.Format.Short)}.");
_recoveryStep.RecoverData(blockRef.Block);
try
{
_blockQueue.Add(blockRef);
}
catch (InvalidOperationException)
{
if (_logger.IsDebug) _logger.Debug($"Recovery loop stopping.");
return;
}
}
}
private void RunProcessingLoop()
{
_stats.Start();
if (_logger.IsDebug) _logger.Debug($"Starting block processor - {_blockQueue.Count} blocks waiting in the queue.");
if (IsEmpty)
{
ProcessingQueueEmpty?.Invoke(this, EventArgs.Empty);
}
foreach (BlockRef blockRef in _blockQueue.GetConsumingEnumerable(_loopCancellationSource.Token))
{
if (blockRef.IsInDb || blockRef.Block == null)
{
throw new InvalidOperationException("Processing loop expects only resolved blocks");
}
Block block = blockRef.Block;
if (_logger.IsTrace) _logger.Trace($"Processing block {block.ToString(Block.Format.Short)}).");
IBlockTracer tracer = NullBlockTracer.Instance;
Block processedBlock = Process(block, blockRef.ProcessingOptions, tracer);
if (processedBlock == null)
{
if (_logger.IsTrace) _logger.Trace($"Failed / skipped processing {block.ToString(Block.Format.Full)}");
}
else
{
if (_logger.IsTrace) _logger.Trace($"Processed block {block.ToString(Block.Format.Full)}");
_stats.UpdateStats(block, _recoveryQueue.Count, _blockQueue.Count);
}
if (_logger.IsTrace) _logger.Trace($"Now {_blockQueue.Count} blocks waiting in the queue.");
if (IsEmpty)
{
ProcessingQueueEmpty?.Invoke(this, EventArgs.Empty);
}
}
if (_logger.IsInfo) _logger.Info("Block processor queue stopped.");
}
public event EventHandler ProcessingQueueEmpty;
public bool IsEmpty => _blockQueue.Count == 0 && _recoveryQueue.Count == 0;
[Todo("Introduce priority queue and create a SuggestWithPriority that waits for block execution to return a block, then make this private")]
public Block Process(Block suggestedBlock, ProcessingOptions options, IBlockTracer tracer)
{
if (!RunSimpleChecksAheadOfProcessing(suggestedBlock, options))
{
return null;
}
UInt256 totalDifficulty = suggestedBlock.TotalDifficulty ?? 0;
if (_logger.IsTrace) _logger.Trace($"Total difficulty of block {suggestedBlock.ToString(Block.Format.Short)} is {totalDifficulty}");
Block[] processedBlocks = null;
bool shouldProcess = suggestedBlock.IsGenesis
|| totalDifficulty > (_blockTree.Head?.TotalDifficulty ?? 0)
// so above is better and more correct but creates an impression of the node staying behind on stats page
// so we are okay to process slightly more
// and below is less correct but potentially reporting well
// || totalDifficulty >= (_blockTree.Head?.TotalDifficulty ?? 0)
|| (options & ProcessingOptions.ForceProcessing) == ProcessingOptions.ForceProcessing;
if (!shouldProcess)
{
if (_logger.IsDebug) _logger.Debug($"Skipped processing of {suggestedBlock.ToString(Block.Format.FullHashAndNumber)}, Head = {_blockTree.Head?.Header?.ToString(BlockHeader.Format.Short)}, total diff = {totalDifficulty}, head total diff = {_blockTree.Head?.TotalDifficulty}");
return null;
}
ProcessingBranch processingBranch = PrepareProcessingBranch(suggestedBlock, options);
PrepareBlocksToProcess(suggestedBlock, options, processingBranch);
try
{
processedBlocks = _blockProcessor.Process(processingBranch.Root, processingBranch.BlocksToProcess, options, tracer);
}
catch (InvalidBlockException ex)
{
for (int i = 0; i < processingBranch.BlocksToProcess.Count; i++)
{
if (processingBranch.BlocksToProcess[i].Hash == ex.InvalidBlockHash)
{
_blockTree.DeleteInvalidBlock(processingBranch.BlocksToProcess[i]);
if (_logger.IsDebug) _logger.Debug($"Skipped processing of {suggestedBlock.ToString(Block.Format.FullHashAndNumber)} because of {processingBranch.BlocksToProcess[i].ToString(Block.Format.FullHashAndNumber)} is invalid");
return null;
}
}
}
if ((options & (ProcessingOptions.ReadOnlyChain | ProcessingOptions.DoNotUpdateHead)) == 0)
{
_blockTree.UpdateMainChain(processingBranch.Blocks.ToArray(), true);
}
Block lastProcessed = null;
if (processedBlocks != null && processedBlocks.Length > 0)
{
lastProcessed = processedBlocks[^1];
if (_logger.IsTrace) _logger.Trace($"Setting total on last processed to {lastProcessed.ToString(Block.Format.Short)}");
lastProcessed.Header.TotalDifficulty = suggestedBlock.TotalDifficulty;
}
else
{
if (_logger.IsDebug) _logger.Debug($"Skipped processing of {suggestedBlock.ToString(Block.Format.FullHashAndNumber)}, last processed is null: {lastProcessed == null}, processedBlocks.Length: {processedBlocks?.Length}");
}
if ((options & ProcessingOptions.ReadOnlyChain) == ProcessingOptions.None)
{
_stats.UpdateStats(lastProcessed, _recoveryQueue.Count, _blockQueue.Count);
}
return lastProcessed;
}
private void PrepareBlocksToProcess(Block suggestedBlock, ProcessingOptions options, ProcessingBranch processingBranch)
{
List<Block> blocksToProcess = processingBranch.BlocksToProcess;
if ((options & ProcessingOptions.ForceProcessing) != 0)
{
processingBranch.Blocks.Clear();
blocksToProcess.Add(suggestedBlock);
}
else
{
foreach (Block block in processingBranch.Blocks)
{
if (block.Hash != null && _blockTree.WasProcessed(block.Number, block.Hash))
{
if (_logger.IsInfo) _logger.Info($"Rerunning block after reorg: {block.ToString(Block.Format.FullHashAndNumber)}");
}
blocksToProcess.Add(block);
}
blocksToProcess.Reverse();
}
if (_logger.IsTrace) _logger.Trace($"Processing {blocksToProcess.Count} blocks from state root {processingBranch.Root}");
for (int i = 0;
i < blocksToProcess.Count;
i++)
{
/* this can happen if the block was loaded as an ancestor and did not go through the recovery queue */
_recoveryStep.RecoverData(blocksToProcess[i]);
}
}
private ProcessingBranch PrepareProcessingBranch(Block suggestedBlock, ProcessingOptions options)
{
BlockHeader branchingPoint = null;
List<Block> blocksToBeAddedToMain = new List<Block>();
Block toBeProcessed = suggestedBlock;
do
{
blocksToBeAddedToMain.Add(toBeProcessed);
if (_logger.IsTrace) _logger.Trace($"To be processed (of {suggestedBlock.ToString(Block.Format.Short)}) is {toBeProcessed?.ToString(Block.Format.Short)}");
if (toBeProcessed.IsGenesis)
{
break;
}
branchingPoint = _blockTree.FindParentHeader(toBeProcessed.Header, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (branchingPoint == null)
{
break; //failure here
}
// for beam sync we do not expect previous blocks to necessarily be there and we
// do not need them since we can requests state from outside
if ((options & ProcessingOptions.IgnoreParentNotOnMainChain) != 0)
{
break;
}
bool isFastSyncTransition = _blockTree.Head?.Header == _blockTree.Genesis && toBeProcessed.Number > 1;
if (!isFastSyncTransition)
{
if (_logger.IsTrace) _logger.Trace($"Finding parent of {toBeProcessed.ToString(Block.Format.Short)}");
toBeProcessed = _blockTree.FindParent(toBeProcessed.Header, BlockTreeLookupOptions.None);
if (_logger.IsTrace) _logger.Trace($"Found parent {toBeProcessed?.ToString(Block.Format.Short)}");
if (toBeProcessed == null)
{
if (_logger.IsTrace) _logger.Trace($"Treating this as fast sync transition for {suggestedBlock.ToString(Block.Format.Short)}");
break;
}
}
else
{
break;
}
} while (!_blockTree.IsMainChain(branchingPoint.Hash));
if (branchingPoint != null && branchingPoint.Hash != _blockTree.Head?.Hash)
{
if (_logger.IsTrace) _logger.Trace($"Head block was: {_blockTree.Head?.Header?.ToString(BlockHeader.Format.Short)}");
if (_logger.IsTrace) _logger.Trace($"Branching from: {branchingPoint.ToString(BlockHeader.Format.Short)}");
}
else
{
if (_logger.IsTrace) _logger.Trace(branchingPoint == null ? "Setting as genesis block" : $"Adding on top of {branchingPoint.ToString(BlockHeader.Format.Short)}");
}
Keccak stateRoot = branchingPoint?.StateRoot;
if (_logger.IsTrace) _logger.Trace($"State root lookup: {stateRoot}");
return new ProcessingBranch(stateRoot, blocksToBeAddedToMain);
}
[Todo(Improve.Refactor, "This probably can be made conditional (in DEBUG only)")]
private bool RunSimpleChecksAheadOfProcessing(Block suggestedBlock, ProcessingOptions options)
{
/* a bit hacky way to get the invalid branch out of the processing loop */
if (suggestedBlock.Number != 0 && !_blockTree.IsKnownBlock(suggestedBlock.Number - 1, suggestedBlock.ParentHash))
{
if (_logger.IsDebug) _logger.Debug($"Skipping processing block {suggestedBlock.ToString(Block.Format.FullHashAndNumber)} with unknown parent");
return false;
}
if (suggestedBlock.Header.TotalDifficulty == null)
{
if (_logger.IsDebug) _logger.Debug($"Skipping processing block {suggestedBlock.ToString(Block.Format.FullHashAndNumber)} without total difficulty");
throw new InvalidOperationException("Block without total difficulty calculated was suggested for processing");
}
if ((options & ProcessingOptions.NoValidation) == 0 && suggestedBlock.Hash == null)
{
if (_logger.IsDebug) _logger.Debug($"Skipping processing block {suggestedBlock.ToString(Block.Format.FullHashAndNumber)} without calculated hash");
throw new InvalidOperationException("Block hash should be known at this stage if running in a validating mode");
}
for (int i = 0;
i < suggestedBlock.Ommers.Length;
i++)
{
if (suggestedBlock.Ommers[i].Hash == null)
{
if (_logger.IsDebug) _logger.Debug($"Skipping processing block {suggestedBlock.ToString(Block.Format.FullHashAndNumber)} with null ommer hash ar {i}");
throw new InvalidOperationException($"Ommer's {i} hash is null when processing block");
}
}
return true;
}
public void Dispose()
{
_recoveryQueue?.Dispose();
_blockQueue?.Dispose();
_loopCancellationSource?.Dispose();
_recoveryTask?.Dispose();
_processorTask?.Dispose();
_blockTree.NewBestSuggestedBlock -= OnNewBestBlock;
}
private struct ProcessingBranch
{
public ProcessingBranch(Keccak root, List<Block> blocks)
{
Root = root;
Blocks = blocks;
BlocksToProcess = new List<Block>();
ProcessedBlocks = new List<Block>();
}
public Keccak Root { get; }
public List<Block> Blocks { get; }
public List<Block> BlocksToProcess { get; }
public List<Block> ProcessedBlocks { get; }
}
}
} | 1 | 23,752 | IMO == true is more readable than ?? false | NethermindEth-nethermind | .cs |
@@ -3,7 +3,7 @@ const test = require('./shared').assert,
setupDatabase = require('./shared').setupDatabase,
Script = require('vm'),
expect = require('chai').expect,
- normalizedFunctionString = require('bson/lib/bson/parser/utils').normalizedFunctionString,
+ normalizedFunctionString = require('bson/lib/parser/utils').normalizedFunctionString,
Buffer = require('safe-buffer').Buffer;
const { | 1 | 'use strict';
const test = require('./shared').assert,
setupDatabase = require('./shared').setupDatabase,
Script = require('vm'),
expect = require('chai').expect,
normalizedFunctionString = require('bson/lib/bson/parser/utils').normalizedFunctionString,
Buffer = require('safe-buffer').Buffer;
const {
Long,
Timestamp,
ObjectID,
DBRef,
Symbol,
Double,
Binary,
MinKey,
MaxKey,
Code
} = require('../..');
/**
* Module for parsing an ISO 8601 formatted string into a Date object.
* @ignore
*/
var ISODate = function(string) {
var match;
if (typeof string.getTime === 'function') return string;
else if (
(match = string.match(
/^(\d{4})(-(\d{2})(-(\d{2})(T(\d{2}):(\d{2})(:(\d{2})(\.(\d+))?)?(Z|((\+|-)(\d{2}):(\d{2}))))?)?)?$/
))
) {
var date = new Date();
date.setUTCFullYear(Number(match[1]));
date.setUTCMonth(Number(match[3]) - 1 || 0);
date.setUTCDate(Number(match[5]) || 0);
date.setUTCHours(Number(match[7]) || 0);
date.setUTCMinutes(Number(match[8]) || 0);
date.setUTCSeconds(Number(match[10]) || 0);
date.setUTCMilliseconds(Number('.' + match[12]) * 1000 || 0);
if (match[13] && match[13] !== 'Z') {
var h = Number(match[16]) || 0,
m = Number(match[17]) || 0;
h *= 3600000;
m *= 60000;
var offset = h + m;
if (match[15] === '+') offset = -offset;
new Date(date.valueOf() + offset);
}
return date;
} else throw new Error('Invalid ISO 8601 date given.', __filename);
};
describe('Insert', function() {
before(function() {
return setupDatabase(this.configuration);
});
/**
* @ignore
*/
it('shouldCorrectlyPerformSingleInsert', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyPerformSingleInsert');
collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection.findOne(function(err, item) {
test.equal(1, item.a);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyHandleMultipleDocumentInsert', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_multiple_insert');
var docs = [{ a: 1 }, { a: 2 }];
collection.insert(docs, configuration.writeConcernMax(), function(err, r) {
test.equal(2, r.result.n);
test.equal(2, r.ops.length);
test.equal(2, r.insertedCount);
test.equal(2, Object.keys(r.insertedIds).length);
test.ok(r.insertedIds[0]._bsontype === 'ObjectID');
test.ok(r.insertedIds[1]._bsontype === 'ObjectID');
r.ops.forEach(function(doc) {
test.ok(
doc['_id']._bsontype === 'ObjectID' ||
Object.prototype.toString.call(doc['_id']) === '[object ObjectID]'
);
});
// Let's ensure we have both documents
collection.find().toArray(function(err, docs) {
test.equal(2, docs.length);
var results = [];
// Check that we have all the results we want
docs.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Let's close the db
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyExecuteSaveInsertUpdate', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyExecuteSaveInsertUpdate');
collection.save({ email: 'save' }, configuration.writeConcernMax(), function() {
collection.insert({ email: 'insert' }, configuration.writeConcernMax(), function() {
collection.update(
{ email: 'update' },
{ email: 'update' },
{ upsert: true, w: 1 },
function() {
collection.find().toArray(function(e, a) {
test.equal(3, a.length);
client.close(done);
});
}
);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertAndRetrieveLargeIntegratedArrayDocument', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_should_deserialize_large_integrated_array');
var doc = {
a: 0,
b: [
'tmp1',
'tmp2',
'tmp3',
'tmp4',
'tmp5',
'tmp6',
'tmp7',
'tmp8',
'tmp9',
'tmp10',
'tmp11',
'tmp12',
'tmp13',
'tmp14',
'tmp15',
'tmp16'
]
};
// Insert the collection
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Fetch and check the collection
collection.findOne({ a: 0 }, function(err, result) {
test.deepEqual(doc.a, result.a);
test.deepEqual(doc.b, result.b);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertAndRetrieveDocumentWithAllTypes', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_all_serialization_types');
var date = new Date();
var oid = new ObjectID();
var string = 'binstring';
var bin = new Binary();
for (var index = 0; index < string.length; index++) {
bin.put(string.charAt(index));
}
var motherOfAllDocuments = {
string: 'hello',
array: [1, 2, 3],
hash: { a: 1, b: 2 },
date: date,
oid: oid,
binary: bin,
int: 42,
float: 33.3333,
regexp: /regexp/,
boolean: true,
long: date.getTime(),
where: new Code('this.a > i', { i: 1 }),
dbref: new DBRef('namespace', oid, 'integration_tests_')
};
collection.insert(motherOfAllDocuments, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection.findOne(function(err, doc) {
// Assert correct deserialization of the values
test.equal(motherOfAllDocuments.string, doc.string);
test.deepEqual(motherOfAllDocuments.array, doc.array);
test.equal(motherOfAllDocuments.hash.a, doc.hash.a);
test.equal(motherOfAllDocuments.hash.b, doc.hash.b);
test.equal(date.getTime(), doc.long);
test.equal(date.toString(), doc.date.toString());
test.equal(date.getTime(), doc.date.getTime());
test.equal(motherOfAllDocuments.oid.toHexString(), doc.oid.toHexString());
test.equal(motherOfAllDocuments.binary.value(), doc.binary.value());
test.equal(motherOfAllDocuments.int, doc.int);
test.equal(motherOfAllDocuments.long, doc.long);
test.equal(motherOfAllDocuments.float, doc.float);
test.equal(motherOfAllDocuments.regexp.toString(), doc.regexp.toString());
test.equal(motherOfAllDocuments.boolean, doc.boolean);
test.equal(motherOfAllDocuments.where.code, doc.where.code);
test.equal(motherOfAllDocuments.where.scope['i'], doc.where.scope.i);
test.equal(motherOfAllDocuments.dbref.namespace, doc.dbref.namespace);
test.equal(motherOfAllDocuments.dbref.oid.toHexString(), doc.dbref.oid.toHexString());
test.equal(motherOfAllDocuments.dbref.db, doc.dbref.db);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertAndUpdateDocumentWithNewScriptContext', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
//convience curried handler for functions of type 'a -> (err, result)
function getResult(callback) {
return function(error, result) {
test.ok(error == null);
return callback(result);
};
}
db.collection(
'users',
getResult(function(user_collection) {
user_collection.remove({}, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
//first, create a user object
var newUser = { name: 'Test Account', settings: {} };
user_collection.insert(
[newUser],
configuration.writeConcernMax(),
getResult(function(r) {
var user = r.ops[0];
var scriptCode = "settings.block = []; settings.block.push('test');";
var context = { settings: { thisOneWorks: 'somestring' } };
Script.runInNewContext(scriptCode, context, 'testScript');
//now create update command and issue it
var updateCommand = { $set: context };
user_collection.update(
{ _id: user._id },
updateCommand,
configuration.writeConcernMax(),
getResult(function() {
// Fetch the object and check that the changes are persisted
user_collection.findOne({ _id: user._id }, function(err, doc) {
test.ok(err == null);
test.equal('Test Account', doc.name);
test.equal('somestring', doc.settings.thisOneWorks);
test.equal('test', doc.settings.block[0]);
client.close(done);
});
})
);
})
);
});
})
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlySerializeDocumentWithAllTypesInNewContext', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_all_serialization_types_new_context');
var date = new Date();
var scriptCode =
"var string = 'binstring'\n" +
'var bin = new mongo.Binary()\n' +
'for(var index = 0; index < string.length; index++) {\n' +
' bin.put(string.charAt(index))\n' +
'}\n' +
"motherOfAllDocuments['string'] = 'hello';" +
"motherOfAllDocuments['array'] = [1,2,3];" +
"motherOfAllDocuments['hash'] = {'a':1, 'b':2};" +
"motherOfAllDocuments['date'] = date;" +
"motherOfAllDocuments['oid'] = new mongo.ObjectID();" +
"motherOfAllDocuments['binary'] = bin;" +
"motherOfAllDocuments['int'] = 42;" +
"motherOfAllDocuments['float'] = 33.3333;" +
"motherOfAllDocuments['regexp'] = /regexp/;" +
"motherOfAllDocuments['boolean'] = true;" +
"motherOfAllDocuments['long'] = motherOfAllDocuments['date'].getTime();" +
"motherOfAllDocuments['where'] = new mongo.Code('this.a > i', {i:1});" +
"motherOfAllDocuments['dbref'] = new mongo.DBRef('namespace', motherOfAllDocuments['oid'], 'integration_tests_');";
var context = {
motherOfAllDocuments: {},
mongo: {
ObjectID: ObjectID,
Binary: Binary,
Code: Code,
DBRef: DBRef
},
date: date
};
// Execute function in context
Script.runInNewContext(scriptCode, context, 'testScript');
// sys.puts(sys.inspect(context.motherOfAllDocuments))
var motherOfAllDocuments = context.motherOfAllDocuments;
collection.insert(context.motherOfAllDocuments, configuration.writeConcernMax(), function(
err,
docs
) {
test.ok(docs);
collection.findOne(function(err, doc) {
// Assert correct deserialization of the values
test.equal(motherOfAllDocuments.string, doc.string);
test.deepEqual(motherOfAllDocuments.array, doc.array);
test.equal(motherOfAllDocuments.hash.a, doc.hash.a);
test.equal(motherOfAllDocuments.hash.b, doc.hash.b);
test.equal(date.getTime(), doc.long);
test.equal(date.toString(), doc.date.toString());
test.equal(date.getTime(), doc.date.getTime());
test.equal(motherOfAllDocuments.oid.toHexString(), doc.oid.toHexString());
test.equal(motherOfAllDocuments.binary.value(), doc.binary.value());
test.equal(motherOfAllDocuments.int, doc.int);
test.equal(motherOfAllDocuments.long, doc.long);
test.equal(motherOfAllDocuments.float, doc.float);
test.equal(motherOfAllDocuments.regexp.toString(), doc.regexp.toString());
test.equal(motherOfAllDocuments.boolean, doc.boolean);
test.equal(motherOfAllDocuments.where.code, doc.where.code);
test.equal(motherOfAllDocuments.where.scope['i'], doc.where.scope.i);
test.equal(motherOfAllDocuments.dbref.namespace, doc.dbref.namespace);
test.equal(motherOfAllDocuments.dbref.oid.toHexString(), doc.dbref.oid.toHexString());
test.equal(motherOfAllDocuments.dbref.db, doc.dbref.db);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyDoToJsonForLongValue', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_to_json_for_long');
collection.insert(
[{ value: Long.fromNumber(32222432) }],
configuration.writeConcernMax(),
function(err, ids) {
test.ok(ids);
collection.findOne({}, function(err, item) {
test.equal(32222432, item.value);
client.close(done);
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertAndUpdateWithNoCallback', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_insert_and_update_no_callback');
// Insert the update
collection.insert({ i: 1 });
// Update the record
collection.update({ i: 1 }, { $set: { i: 2 } });
// Make sure we leave enough time for mongodb to record the data
setTimeout(function() {
// Locate document
collection.findOne({}, function(err, item) {
test.equal(2, item.i);
client.close(done);
});
}, 100);
});
}
});
/**
* @ignore
*/
it('shouldInsertAndQueryTimestamp', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_insert_and_query_timestamp');
// Insert the update
collection.insert(
{ i: Timestamp.fromNumber(100), j: Long.fromNumber(200) },
configuration.writeConcernMax(),
function(err, r) {
test.ok(r);
// Locate document
collection.findOne({}, function(err, item) {
test.ok(item.i._bsontype === 'Timestamp');
test.equal(100, item.i.toInt());
test.equal(200, item.j);
client.close(done);
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertAndQueryUndefined', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_insert_and_query_undefined');
// Insert the update
collection.insert({ i: undefined }, configuration.writeConcernMax(), function(err, r) {
test.equal(null, err);
test.ok(r);
// Locate document
collection.findOne({}, function(err, item) {
test.equal(null, item.i);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlySerializeDBRefToJSON', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var dbref = new DBRef('foo', ObjectID.createFromHexString('fc24a04d4560531f00000000'), null);
JSON.stringify(dbref);
done();
}
});
/**
* @ignore
*/
it('shouldThrowErrorIfSerializingFunctionOrdered', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_should_throw_error_if_serializing_function');
var func = function() {
return 1;
};
// Insert the update
collection.insert({ i: 1, z: func }, { w: 1, serializeFunctions: true }, function(
err,
result
) {
test.equal(null, err);
collection.findOne({ _id: result.ops[0]._id }, function(err, object) {
test.equal(normalizedFunctionString(func), object.z.code);
test.equal(1, object.i);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldThrowErrorIfSerializingFunctionUnOrdered', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_should_throw_error_if_serializing_function_1');
var func = function() {
return 1;
};
// Insert the update
collection.insert(
{ i: 1, z: func },
{ w: 1, serializeFunctions: true, ordered: false },
function(err, result) {
test.equal(null, err);
collection.findOne({ _id: result.ops[0]._id }, function(err, object) {
test.equal(normalizedFunctionString(func), object.z.code);
test.equal(1, object.i);
client.close(done);
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertDocumentWithUUID', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('insert_doc_with_uuid');
collection.insert(
{ _id: '12345678123456781234567812345678', field: '1' },
configuration.writeConcernMax(),
function(err, result) {
test.equal(null, err);
test.ok(result);
collection
.find({ _id: '12345678123456781234567812345678' })
.toArray(function(err, items) {
test.equal(null, err);
test.equal(items[0]._id, '12345678123456781234567812345678');
test.equal(items[0].field, '1');
// Generate a binary id
var binaryUUID = new Binary(
'00000078123456781234567812345678',
Binary.SUBTYPE_UUID
);
collection.insert(
{ _id: binaryUUID, field: '2' },
configuration.writeConcernMax(),
function(err, result) {
test.equal(null, err);
test.ok(result);
collection.find({ _id: binaryUUID }).toArray(function(err, items) {
test.equal(null, err);
test.equal(items[0].field, '2');
client.close(done);
});
}
);
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyCallCallbackWithDbDriverInStrictMode', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_insert_and_update_no_callback_strict');
collection.insert(
{ _id: '12345678123456781234567812345678', field: '1' },
configuration.writeConcernMax(),
function(err, result) {
test.equal(null, err);
test.ok(result);
collection.update(
{ _id: '12345678123456781234567812345678' },
{ $set: { field: 0 } },
configuration.writeConcernMax(),
function(err, r) {
test.equal(null, err);
test.equal(1, r.result.n);
client.close(done);
}
);
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertDBRefWithDbNotDefined', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyInsertDBRefWithDbNotDefined');
var doc = { _id: new ObjectID() };
var doc2 = { _id: new ObjectID() };
var doc3 = { _id: new ObjectID() };
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
// Create object with dbref
doc2.ref = new DBRef('shouldCorrectlyInsertDBRefWithDbNotDefined', doc._id);
doc3.ref = new DBRef(
'shouldCorrectlyInsertDBRefWithDbNotDefined',
doc._id,
configuration.db_name
);
collection.insert([doc2, doc3], configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
// Get all items
collection.find().toArray(function(err, items) {
test.equal('shouldCorrectlyInsertDBRefWithDbNotDefined', items[1].ref.namespace);
test.equal(doc._id.toString(), items[1].ref.oid.toString());
test.equal(undefined, items[1].ref.db);
test.equal('shouldCorrectlyInsertDBRefWithDbNotDefined', items[2].ref.namespace);
test.equal(doc._id.toString(), items[2].ref.oid.toString());
test.equal(configuration.db_name, items[2].ref.db);
client.close(done);
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertUpdateRemoveWithNoOptions', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyInsertUpdateRemoveWithNoOptions');
collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.update({ a: 1 }, { a: 2 }, configuration.writeConcernMax(), function(
err,
result
) {
test.equal(null, err);
test.ok(result);
collection.remove({ a: 2 }, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.count(function(err, count) {
test.equal(0, count);
client.close(done);
});
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyExecuteMultipleFetches', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
// Search parameter
var to = 'ralph';
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyExecuteMultipleFetches');
// Execute query
collection.insert(
{ addresses: { localPart: 'ralph' } },
configuration.writeConcernMax(),
function(err, result) {
test.equal(null, err);
test.ok(result);
// Let's find our user
collection.findOne({ 'addresses.localPart': to }, function(err, doc) {
test.equal(null, err);
test.equal(to, doc.addresses.localPart);
client.close(done);
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyFailWhenNoObjectToUpdate', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyFailWhenNoObjectToUpdate');
collection.update(
{ _id: new ObjectID() },
{ email: 'update' },
configuration.writeConcernMax(),
function(err, result) {
test.equal(0, result.result.n);
client.close(done);
}
);
});
}
});
/**
* @ignore
*/
it('Should correctly insert object and retrieve it when containing array and IsoDate', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var doc = {
_id: new ObjectID('4e886e687ff7ef5e00000162'),
str: 'foreign',
type: 2,
timestamp: ISODate('2011-10-02T14:00:08.383Z'),
links: [
'http://www.reddit.com/r/worldnews/comments/kybm0/uk_home_secretary_calls_for_the_scrapping_of_the/'
]
};
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection(
'Should_correctly_insert_object_and_retrieve_it_when_containing_array_and_IsoDate'
);
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.ok(err == null);
test.ok(result);
collection.findOne(function(err, item) {
test.ok(err == null);
test.deepEqual(doc, item);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('Should correctly insert object with timestamps', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var doc = {
_id: new ObjectID('4e886e687ff7ef5e00000162'),
str: 'foreign',
type: 2,
timestamp: new Timestamp(10000),
links: [
'http://www.reddit.com/r/worldnews/comments/kybm0/uk_home_secretary_calls_for_the_scrapping_of_the/'
],
timestamp2: new Timestamp(33333)
};
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('Should_correctly_insert_object_with_timestamps');
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.ok(err == null);
test.ok(result);
collection.findOne(function(err, item) {
test.ok(err == null);
test.deepEqual(doc, item);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('Should fail on insert due to key starting with $', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var doc = {
_id: new ObjectID('4e886e687ff7ef5e00000162'),
$key: 'foreign'
};
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('Should_fail_on_insert_due_to_key_starting_with');
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.ok(err != null);
test.equal(null, result);
client.close(done);
});
});
}
});
/**
* @ignore
*/
it('Should Correctly allow for control of serialization of functions on command level', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var doc = {
str: 'String',
func: function() {}
};
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection(
'Should_Correctly_allow_for_control_of_serialization_of_functions_on_command_level'
);
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.update(
{ str: 'String' },
{ $set: { c: 1, d: function() {} } },
{ w: 1, serializeFunctions: false },
function(err, result) {
test.equal(1, result.result.n);
collection.findOne({ str: 'String' }, function(err, item) {
test.equal(undefined, item.d);
// Execute a safe insert with replication to two servers
collection.findAndModify(
{ str: 'String' },
[['a', 1]],
{ $set: { f: function() {} } },
{ new: true, safe: true, serializeFunctions: true },
function(err, result) {
test.ok(result.value.f._bsontype === 'Code');
client.close(done);
}
);
});
}
);
});
});
}
});
/**
* @ignore
*/
it('Should Correctly allow for control of serialization of functions on collection level', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var doc = {
str: 'String',
func: function() {}
};
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection(
'Should_Correctly_allow_for_control_of_serialization_of_functions_on_collection_level',
{ serializeFunctions: true }
);
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.findOne({ str: 'String' }, function(err, item) {
test.ok(item.func._bsontype === 'Code');
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('Should Correctly allow for using a Date object as _id', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var doc = {
_id: new Date(),
str: 'hello'
};
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('Should_Correctly_allow_for_using_a_Date_object_as__id');
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.findOne({ str: 'hello' }, function(err, item) {
test.ok(item._id instanceof Date);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('Should Correctly fail to update returning 0 results', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('Should_Correctly_fail_to_update_returning_0_results');
collection.update({ a: 1 }, { $set: { a: 1 } }, configuration.writeConcernMax(), function(
err,
r
) {
test.equal(0, r.result.n);
client.close(done);
});
});
}
});
/**
* @ignore
*/
it('Should Correctly update two fields including a sub field', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var doc = {
_id: new ObjectID(),
Prop1: 'p1',
Prop2: 'p2',
More: {
Sub1: 's1',
Sub2: 's2',
Sub3: 's3'
}
};
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('Should_Correctly_update_two_fields_including_a_sub_field');
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
// Update two fields
collection.update(
{ _id: doc._id },
{ $set: { Prop1: 'p1_2', 'More.Sub2': 's2_2' } },
configuration.writeConcernMax(),
function(err, r) {
test.equal(null, err);
test.equal(1, r.result.n);
collection.findOne({ _id: doc._id }, function(err, item) {
test.equal(null, err);
test.equal('p1_2', item.Prop1);
test.equal('s2_2', item.More.Sub2);
client.close(done);
});
}
);
});
});
}
});
/**
* @ignore
*/
it('Should correctly fail due to duplicate key for _id', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection(
'Should_Correctly_update_two_fields_including_a_sub_field_2'
);
collection.insert({ _id: 1 }, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
// Update two fields
collection.insert({ _id: 1 }, configuration.writeConcernMax(), function(err, r) {
test.equal(r, null);
test.ok(err != null);
test.ok(err.result);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyInsertDocWithCustomId', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyInsertDocWithCustomId');
// Insert the update
collection.insert({ _id: 0, test: 'hello' }, configuration.writeConcernMax(), function(
err,
result
) {
test.equal(null, err);
test.ok(result);
collection.findOne({ _id: 0 }, function(err, item) {
test.equal(0, item._id);
test.equal('hello', item.test);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformUpsertAgainstNewDocumentAndExistingOne', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection(
'shouldCorrectlyPerformUpsertAgainstNewDocumentAndExistingOne'
);
// Upsert a new doc
collection.update({ a: 1 }, { a: 1 }, { upsert: true, w: 1 }, function(err, result) {
if (result.result.updatedExisting) test.equal(false, result.result.updatedExisting);
test.equal(1, result.result.n);
test.ok(result.result.upserted != null);
// Upsert an existing doc
collection.update({ a: 1 }, { a: 1 }, { upsert: true, w: 1 }, function(err, result) {
if (result.updatedExisting) test.equal(true, result.updatedExisting);
test.equal(1, result.result.n);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformLargeTextInsert', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyPerformLargeTextInsert');
// Create large string, insert and then retrive
var string = '';
// Create large text field
for (var i = 0; i < 50000; i++) {
string = string + 'a';
}
collection.insert({ a: 1, string: string }, configuration.writeConcernMax(), function(
err,
result
) {
test.equal(null, err);
test.ok(result);
collection.findOne({ a: 1 }, function(err, doc) {
test.equal(null, err);
test.equal(50000, doc.string.length);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformInsertOfObjectsUsingToBSON', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyPerformInsertOfObjectsUsingToBSON');
// Create document with toBSON method
var doc = { a: 1, b: 1 };
doc.toBSON = function() {
return { c: this.a };
};
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.findOne({ c: 1 }, function(err, doc) {
test.equal(null, err);
test.deepEqual(1, doc.c);
client.close(done);
});
});
});
}
});
/**
* @ignore
*/
it('shouldAttempToForceBsonSize', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: 'single' }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldAttempToForceBsonSize', function(err, collection) {
// var doc = {a:1, b:new Binary(Buffer.alloc(16777216)/5)}
var doc = [
{ a: 1, b: new Binary(Buffer.alloc(16777216 / 3)) },
{ a: 1, b: new Binary(Buffer.alloc(16777216 / 3)) },
{ a: 1, b: new Binary(Buffer.alloc(16777216 / 3)) }
];
collection.insert(doc, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.findOne({ a: 1 }, function(err, doc) {
test.equal(null, err);
test.deepEqual(1, doc.a);
client.close(done);
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyUseCustomObjectToUpdateDocument', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyUseCustomObjectToUpdateDocument');
collection.insert({ a: { b: { c: 1 } } }, configuration.writeConcernMax(), function(
err,
result
) {
test.equal(null, err);
test.ok(result);
// Dynamically build query
var query = {};
query['a'] = {};
query.a['b'] = {};
query.a.b['c'] = 1;
// Update document
collection.update(
query,
{ $set: { 'a.b.d': 1 } },
configuration.writeConcernMax(),
function(err, r) {
test.equal(null, err);
test.equal(1, r.result.n);
client.close(done);
}
);
});
});
}
});
/**
* @ignore
*/
it('shouldExecuteInsertWithNoCallbackAndWriteConcern', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldExecuteInsertWithNoCallbackAndWriteConcern');
collection.insert({ a: { b: { c: 1 } } }).then(
() => {
client.close(done);
},
err => {
client.close(err2 => done(err || err2));
}
);
});
}
});
/**
* @ignore
*/
it('executesCallbackOnceWithOveriddenDefaultDbWriteConcern', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
// The actual test we wish to run
test: function(done) {
function cb(err) {
test.equal(null, err);
client.close(done);
}
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('gh-completely2');
collection.insert({ a: 1 }, { w: 0 }, cb);
});
}
});
/**
* @ignore
*/
it('executesCallbackOnceWithOveriddenDefaultDbWriteConcernWithUpdate', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
// The actual test we wish to run
test: function(done) {
function cb(err) {
test.equal(null, err);
client.close(done);
}
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('gh-completely3');
collection.update({ a: 1 }, { a: 2 }, { upsert: true, w: 0 }, cb);
});
}
});
/**
* @ignore
*/
it('executesCallbackOnceWithOveriddenDefaultDbWriteConcernWithRemove', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
// The actual test we wish to run
test: function(done) {
function cb(err) {
test.equal(null, err);
client.close(done);
}
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('gh-completely1');
collection.remove({ a: 1 }, { w: 0 }, cb);
});
}
});
/**
* @ignore
*/
it('handleBSONTypeInsertsCorrectly', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: {
topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'],
mongodb: '<2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('bson_types_insert');
var document = {
symbol: new Symbol('abcdefghijkl'),
objid: new ObjectID('abcdefghijkl'),
double: new Double(1),
binary: new Binary(Buffer.from('hello world')),
minkey: new MinKey(),
maxkey: new MaxKey(),
code: new Code('function () {}', { a: 55 })
};
collection.insert(document, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.findOne({ symbol: new Symbol('abcdefghijkl') }, function(err, doc) {
test.equal(null, err);
test.equal('abcdefghijkl', doc.symbol.toString());
collection.findOne({ objid: new ObjectID('abcdefghijkl') }, function(err, doc) {
test.equal(null, err);
test.equal('6162636465666768696a6b6c', doc.objid.toString());
collection.findOne({ double: new Double(1) }, function(err, doc) {
test.equal(null, err);
test.equal(1, doc.double);
collection.findOne({ binary: new Binary(Buffer.from('hello world')) }, function(
err,
doc
) {
test.equal(null, err);
test.equal('hello world', doc.binary.toString());
collection.findOne({ minkey: new MinKey() }, function(err, doc) {
test.equal(null, err);
test.ok(doc.minkey._bsontype === 'MinKey');
collection.findOne({ maxkey: new MaxKey() }, function(err, doc) {
test.equal(null, err);
test.ok(doc.maxkey._bsontype === 'MaxKey');
collection.findOne({ code: new Code('function () {}', { a: 55 }) }, function(
err,
doc
) {
test.equal(null, err);
test.ok(doc != null);
client.close(done);
});
});
});
});
});
});
});
});
});
}
});
/**
* @ignore
*/
it('handleBSONTypeInsertsCorrectlyFor28OrHigher', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: {
topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'],
mongodb: '>=2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('bson_types_insert_1');
var document = {
symbol: new Symbol('abcdefghijkl'),
objid: new ObjectID('abcdefghijkl'),
double: new Double(1),
binary: new Binary(Buffer.from('hello world')),
minkey: new MinKey(),
maxkey: new MaxKey(),
code: new Code('function () {}', { a: 55 })
};
collection.insert(document, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.findOne({ symbol: new Symbol('abcdefghijkl') }, function(err, doc) {
test.equal(null, err);
test.equal('abcdefghijkl', doc.symbol.toString());
collection.findOne({ objid: new ObjectID('abcdefghijkl') }, function(err, doc) {
test.equal(null, err);
test.equal('6162636465666768696a6b6c', doc.objid.toString());
collection.findOne({ double: new Double(1) }, function(err, doc) {
test.equal(null, err);
test.equal(1, doc.double);
collection.findOne({ binary: new Binary(Buffer.from('hello world')) }, function(
err,
doc
) {
test.equal(null, err);
test.equal('hello world', doc.binary.toString());
collection.findOne({ minkey: new MinKey() }, function(err, doc) {
test.equal(null, err);
test.ok(doc.minkey._bsontype === 'MinKey');
collection.findOne({ maxkey: new MaxKey() }, function(err, doc) {
test.equal(null, err);
test.ok(doc.maxkey._bsontype === 'MaxKey');
collection.findOne({ code: new Code('function () {}', { a: 55 }) }, function(
err,
doc
) {
test.equal(null, err);
test.ok(doc != null);
client.close(done);
});
});
});
});
});
});
});
});
});
}
});
/**
* @ignore
*/
it('mixedTimestampAndDateQuery', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('timestamp_date');
var d = new Date();
var documents = [{ x: new Timestamp(1, 2) }, { x: d }];
collection.insert(documents, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.findOne({ x: new Timestamp(1, 2) }, function(err, doc) {
test.equal(null, err);
test.ok(doc != null);
collection.findOne({ x: d }, function(err, doc) {
test.equal(null, err);
test.ok(doc != null);
client.close(done);
});
});
});
});
}
});
/**
* @ignore
*/
it('positiveAndNegativeInfinity', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('negative_pos');
var document = {
pos: Number.POSITIVE_INFINITY,
neg: Number.NEGATIVE_INFINITY
};
collection.insert(document, configuration.writeConcernMax(), function(err, result) {
test.equal(null, err);
test.ok(result);
collection.findOne({}, function(err, doc) {
test.equal(null, err);
test.equal(Number.POSITIVE_INFINITY, doc.pos);
test.equal(Number.NEGATIVE_INFINITY, doc.neg);
client.close(done);
});
});
});
}
});
it('shouldCorrectlyInsertSimpleRegExpDocument', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var regexp = /foobar/i;
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_regex', function(err, collection) {
collection.insert({ b: regexp }, configuration.writeConcernMax(), function(err, ids) {
test.equal(null, err);
test.ok(ids);
collection
.find({})
.project({ b: 1 })
.toArray(function(err, items) {
test.equal('' + regexp, '' + items[0].b);
// Let's close the db
client.close(done);
});
});
});
});
}
});
it('shouldCorrectlyInsertSimpleUTF8Regexp', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var regexp = /foobaré/;
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldCorrectlyInsertSimpleUTF8Regexp');
collection.insert({ b: regexp }, configuration.writeConcernMax(), function(err, ids) {
test.equal(null, err);
test.ok(ids);
collection
.find({})
.project({ b: 1 })
.toArray(function(err, items) {
test.equal(null, err);
test.equal('' + regexp, '' + items[0].b);
// Let's close the db
client.close(done);
});
});
});
}
});
it('shouldCorrectlyThrowDueToIllegalCollectionName', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var k = Buffer.alloc(15);
for (var i = 0; i < 15; i++) k[i] = 0;
k.write('hello');
k[6] = 0x06;
k.write('world', 10);
try {
db.collection(k.toString());
test.fail(false);
} catch (err) {} // eslint-disable-line
client.close(done);
});
}
});
it('shouldCorrectlyHonorPromoteLongFalseNativeBSON', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var o = configuration.writeConcernMax();
o.promoteLongs = false;
var client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
promoteLongs: false
});
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('shouldCorrectlyHonorPromoteLong').insert(
{
doc: Long.fromNumber(10),
array: [[Long.fromNumber(10)]]
},
function(err, doc) {
test.equal(null, err);
test.ok(doc);
db.collection('shouldCorrectlyHonorPromoteLong').findOne(function(err, doc) {
test.equal(null, err);
test.ok(doc.doc._bsontype === 'Long');
test.ok(doc.array[0][0]._bsontype === 'Long');
client.close(done);
});
}
);
});
}
});
it('shouldCorrectlyHonorPromoteLongFalseNativeBSONWithGetMore', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var o = configuration.writeConcernMax();
o.promoteLongs = false;
var client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
promoteLongs: false
});
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('shouldCorrectlyHonorPromoteLongFalseNativeBSONWithGetMore').insertMany(
[
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) },
{ a: Long.fromNumber(10) }
],
function(err, doc) {
test.equal(null, err);
test.ok(doc);
db.collection('shouldCorrectlyHonorPromoteLongFalseNativeBSONWithGetMore')
.find({})
.batchSize(2)
.toArray(function(err, docs) {
test.equal(null, err);
var doc = docs.pop();
test.ok(doc.a._bsontype === 'Long');
client.close(done);
});
}
);
});
}
});
it('shouldCorrectlyHonorPromoteLongTrueNativeBSON', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('shouldCorrectlyHonorPromoteLongTrueNativeBSON').insert(
{
doc: Long.fromNumber(10),
array: [[Long.fromNumber(10)]]
},
function(err, doc) {
test.equal(null, err);
test.ok(doc);
db.collection('shouldCorrectlyHonorPromoteLongTrueNativeBSON').findOne(function(
err,
doc
) {
test.equal(null, err);
test.equal(null, err);
test.ok('number', typeof doc.doc);
test.ok('number', typeof doc.array[0][0]);
client.close(done);
});
}
);
});
}
});
it('shouldCorrectlyHonorPromoteLongFalseJSBSON', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
promoteLongs: false
});
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('shouldCorrectlyHonorPromoteLongFalseJSBSON').insert(
{
doc: Long.fromNumber(10),
array: [[Long.fromNumber(10)]]
},
function(err, doc) {
test.equal(null, err);
test.ok(doc);
db.collection('shouldCorrectlyHonorPromoteLongFalseJSBSON').findOne(function(err, doc) {
test.equal(null, err);
test.equal(null, err);
test.ok(doc.doc._bsontype === 'Long');
test.ok(doc.array[0][0]._bsontype === 'Long');
client.close(done);
});
}
);
});
}
});
it('shouldCorrectlyHonorPromoteLongTrueJSBSON', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('shouldCorrectlyHonorPromoteLongTrueJSBSON').insert(
{
doc: Long.fromNumber(10),
array: [[Long.fromNumber(10)]]
},
function(err, doc) {
test.equal(null, err);
test.ok(doc);
db.collection('shouldCorrectlyHonorPromoteLongTrueJSBSON').findOne(function(err, doc) {
test.equal(null, err);
test.equal(null, err);
test.ok('number', typeof doc.doc);
test.ok('number', typeof doc.array[0][0]);
client.close(done);
});
}
);
});
}
});
it('shouldCorrectlyWorkWithCheckKeys', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('shouldCorrectlyOverrideCheckKeysJSOnUpdate').update(
{
'ps.op.t': 1
},
{ $set: { b: 1 } },
{ checkKeys: false },
function(err, doc) {
test.equal(null, err);
test.ok(doc);
client.close(done);
}
);
});
}
});
it('shouldCorrectlyApplyBitOperator', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var col = db.collection('shouldCorrectlyApplyBitOperator');
col.insert({ a: 1, b: 1 }, function(err, result) {
test.equal(null, err);
test.ok(result);
col.update({ a: 1 }, { $bit: { b: { and: 0 } } }, function(err, result) {
test.equal(null, err);
test.ok(result);
col.findOne({ a: 1 }, function(err, doc) {
test.equal(null, err);
test.equal(1, doc.a);
test.equal(0, doc.b);
client.close(done);
});
});
});
});
}
});
function trim(str) {
return str.replace(/\n/g, '').replace(/ /g, '');
}
it('shouldCorrectlyPerformInsertAndUpdateWithFunctionSerialization', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var col = db.collection('shouldCorrectlyPerformInsertAndUpdateWithFunctionSerialization', {
serializeFunctions: true
});
col.insert(
{
a: 1,
f: function(x) {
return x;
}
},
function(err, doc) {
test.equal(null, err);
test.ok(doc);
col.update(
{ a: 1 },
{
$set: {
f: function(y) {
return y;
}
}
},
function(err, doc) {
test.equal(null, err);
test.ok(doc);
col.findOne({ a: 1 }, function(err, doc) {
test.equal(null, err);
test.equal(trim('function (y){return y;}'), trim(doc.f.code));
client.close(done);
});
}
);
}
);
});
}
});
it('should correctly insert > 1000 docs using insert and insertMany', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var col = db.collection('shouldCorrectlyAllowforMoreThanAThousandDocsInsert', {
serializeFunctions: true
});
var docs = [];
for (var i = 0; i < 2000; i++) {
docs.push({ a: i });
}
col.insert(docs, function(err, doc) {
test.equal(null, err);
test.equal(2000, doc.result.n);
docs = [];
for (var i = 0; i < 2000; i++) {
docs.push({ a: i });
}
col.insertMany(docs, function(err, doc) {
test.equal(null, err);
test.equal(2000, doc.result.n);
client.close(done);
});
});
});
}
});
it('should return error on unordered insertMany with multiple unique key constraints', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Get collection
var col = db.collection('insertManyMultipleWriteErrors');
col.drop(function(err, r) {
expect(r).to.not.exist;
// Create unique index
col.createIndex({ a: 1 }, { unique: true }, function(err, r) {
test.equal(null, err);
test.ok(r);
col.insertMany(
[{ a: 1 }, { a: 2 }, { a: 1 }, { a: 3 }, { a: 1 }],
{ ordered: false },
function(err, r) {
expect(r).to.not.exist;
expect(err).to.exist;
expect(err.result).to.exist;
expect(err.result.getWriteErrors()).to.have.length(2);
client.close(done);
}
);
});
});
});
}
});
it('should return error on unordered insert with multiple unique key constraints', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Get collection
var col = db.collection('insertManyMultipleWriteErrors1');
col.drop(function(err, r) {
expect(r).to.not.exist;
// Create unique index
col.createIndex({ a: 1 }, { unique: true }, function(err, r) {
test.equal(null, err);
test.ok(r);
col.insert(
[{ a: 1 }, { a: 2 }, { a: 1 }, { a: 3 }, { a: 1 }],
{ ordered: false },
function(err, r) {
expect(r).to.not.exist;
expect(err).to.exist;
expect(err.result).to.exist;
expect(err.result.getWriteErrors()).to.have.length(2);
client.close(done);
}
);
});
});
});
}
});
it('should return error on ordered insertMany with multiple unique key constraints', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Get collection
var col = db.collection('insertManyMultipleWriteErrors2');
col.drop(function(/*err, r*/) {
// TODO: reenable once SERVER-36317 is resolved
// expect(r).to.not.exist;
// Create unique index
col.createIndex({ a: 1 }, { unique: true }, function(err, r) {
test.equal(null, err);
test.ok(r);
col.insertMany(
[{ a: 1 }, { a: 2 }, { a: 1 }, { a: 3 }, { a: 1 }],
{ ordered: true },
function(err, r) {
test.equal(r, null);
test.ok(err != null);
test.ok(err.result);
client.close(done);
}
);
});
});
});
}
});
it('should return error on ordered insert with multiple unique key constraints', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Get collection
var col = db.collection('insertManyMultipleWriteErrors3');
col.drop(function(/*err, r*/) {
// TODO: reenable once SERVER-36317 is resolved
// expect(r).to.not.exist;
// Create unique index
col.createIndex({ a: 1 }, { unique: true }, function(err, r) {
test.equal(null, err);
test.ok(r);
col.insert(
[{ a: 1 }, { a: 2 }, { a: 1 }, { a: 3 }, { a: 1 }],
{ ordered: true },
function(err, r) {
test.equal(r, null);
test.ok(err != null);
test.ok(err.result);
client.close(done);
}
);
});
});
});
}
});
it('Correctly allow forceServerObjectId for insertOne', {
metadata: { requires: { topology: ['single'] } },
// The actual test we wish to run
test: function(done) {
var started = [];
var succeeded = [];
var listener = require('../..').instrument(function(err) {
test.equal(null, err);
});
listener.on('started', function(event) {
if (event.commandName === 'insert') started.push(event);
});
listener.on('succeeded', function(event) {
if (event.commandName === 'insert') succeeded.push(event);
});
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
db.collection('apm_test')
.insertOne({ a: 1 }, { forceServerObjectId: true })
.then(function() {
test.equal(undefined, started[0].command.documents[0]._id);
listener.uninstrument();
client.close(done);
});
});
}
});
it('Correctly allow forceServerObjectId for insertMany', {
metadata: { requires: { topology: ['single'] } },
// The actual test we wish to run
test: function(done) {
var started = [];
var succeeded = [];
var listener = require('../..').instrument(function(err) {
test.equal(null, err);
});
listener.on('started', function(event) {
if (event.commandName === 'insert') started.push(event);
});
listener.on('succeeded', function(event) {
if (event.commandName === 'insert') succeeded.push(event);
});
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
db.collection('apm_test')
.insertMany([{ a: 1 }], { forceServerObjectId: true })
.then(function() {
test.equal(undefined, started[0].command.documents[0]._id);
listener.uninstrument();
client.close(done);
});
});
}
});
it('Correctly allow forceServerObjectId for insertMany', {
metadata: { requires: { topology: ['single'] } },
// The actual test we wish to run
test: function(done) {
var started = [];
var succeeded = [];
var listener = require('../..').instrument(function(err) {
test.equal(null, err);
});
listener.on('started', function(event) {
if (event.commandName === 'insert') started.push(event);
});
listener.on('succeeded', function(event) {
if (event.commandName === 'insert') succeeded.push(event);
});
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
db.collection('apm_test')
.insertMany([{ a: 1 }], { forceServerObjectId: true })
.then(function() {
test.equal(undefined, started[0].command.documents[0]._id);
listener.uninstrument();
client.close(done);
});
});
}
});
it('should return correct number of ids for insertMany { ordered: true }', {
metadata: { requires: { topology: ['single'] } },
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
db.collection('inserted_ids_test')
.insertMany([{}, {}, {}], { ordered: true })
.then(function(r) {
test.equal(3, Object.keys(r.insertedIds).length);
client.close(done);
});
});
}
});
it('should return correct number of ids for insertMany { ordered: false }', {
metadata: { requires: { topology: ['single'] } },
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
db.collection('inserted_ids_test')
.insertMany([{}, {}, {}], { ordered: false })
.then(function(r) {
test.equal(null, err);
test.equal(3, Object.keys(r.insertedIds).length);
client.close(done);
});
});
}
});
it('Insert document including sub documents', {
metadata: { requires: { topology: ['single'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
var shipment = {
shipment1: 'a'
};
var supplier = {
shipments: [shipment]
};
var product = {
suppliers: [supplier]
};
var doc = {
a: 1,
products: [product]
};
db.collection('sub_documents').insertOne(doc, function(err, r) {
test.equal(null, err);
test.ok(r);
db.collection('sub_documents')
.find({})
.next(function(err, v) {
test.equal(null, err);
test.equal('a', v.products[0].suppliers[0].shipments[0].shipment1);
client.close(done);
});
});
});
}
});
it('should return result using toJSON', {
metadata: { requires: { topology: ['single'] } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
const db = client.db(configuration.db);
db.collection('to_json').insertOne({ _id: 0 }, (err, result) => {
const jsonResult = result.toJSON();
expect(jsonResult.ok).to.equal(1);
expect(jsonResult.n).to.equal(1);
expect(jsonResult.insertedCount).to.equal(1);
expect(jsonResult.ops).to.deep.equal([{ _id: 0 }]);
expect(jsonResult.insertedId).to.equal(0);
expect(jsonResult.result).to.deep.equal({ n: 1, ok: 1 });
client.close(done);
});
});
}
});
});
| 1 | 17,014 | feel free when editing sections like this to introduce modern features like object destructuring. | mongodb-node-mongodb-native | js |
@@ -1,3 +1,4 @@
+# coding: utf-8
# Copyright (c) 2006-2015 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2014-2016 Claudiu Popa <[email protected]> | 1 | # Copyright (c) 2006-2015 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2014-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Noam Yorav-Raphael <[email protected]>
# Copyright (c) 2015 Cezar <[email protected]>
# Copyright (c) 2015 James Morgensen <[email protected]>
# Copyright (c) 2016 Moises Lopez - https://www.vauxoo.com/ <[email protected]>
# Copyright (c) 2016 Ashley Whetter <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""imports checkers for Python code"""
import collections
from distutils import sysconfig
import os
import sys
import copy
import six
import astroid
from astroid import are_exclusive
from astroid.modutils import (get_module_part, is_standard_module)
import isort
from pylint.interfaces import IAstroidChecker
from pylint.utils import get_global_option
from pylint.exceptions import EmptyReportError
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
node_ignores_exception,
is_from_fallback_block
)
from pylint.graph import get_cycles, DotBackend
from pylint.reporters.ureports.nodes import VerbatimText, Paragraph
def _qualified_names(modname):
"""Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = modname.split('.')
return ['.'.join(names[0:i+1]) for i in range(len(names))]
def _get_import_name(importnode, modname):
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
"""
if isinstance(importnode, astroid.ImportFrom):
if importnode.level:
root = importnode.root()
if isinstance(root, astroid.Module):
modname = root.relative_to_absolute_name(
modname, level=importnode.level)
return modname
def _get_first_import(node, context, name, base, level, alias):
"""return the node where [base.]<name> is imported or None if not found
"""
fullname = '%s.%s' % (base, name) if base else name
first = None
found = False
for first in context.body:
if first is node:
continue
if first.scope() is node.scope() and first.fromlineno > node.fromlineno:
continue
if isinstance(first, astroid.Import):
if any(fullname == iname[0] for iname in first.names):
found = True
break
elif isinstance(first, astroid.ImportFrom):
if level == first.level:
for imported_name, imported_alias in first.names:
if fullname == '%s.%s' % (first.modname, imported_name):
found = True
break
if name != '*' and name == imported_name and not (alias or imported_alias):
found = True
break
if found:
break
if found and not are_exclusive(first, node):
return first
return None
def _ignore_import_failure(node, modname, ignored_modules):
for submodule in _qualified_names(modname):
if submodule in ignored_modules:
return True
return node_ignores_exception(node, ImportError)
# utilities to represents import dependencies as tree and dot graph ###########
def _make_tree_defs(mod_files_list):
"""get a list of 2-uple (module, list_of_files_which_import_this_module),
it will return a dictionary to represent this as a tree
"""
tree_defs = {}
for mod, files in mod_files_list:
node = (tree_defs, ())
for prefix in mod.split('.'):
node = node[0].setdefault(prefix, [{}, []])
node[1] += files
return tree_defs
def _repr_tree_defs(data, indent_str=None):
"""return a string which represents imports as a tree"""
lines = []
nodes = data.items()
for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])):
if not files:
files = ''
else:
files = '(%s)' % ','.join(sorted(files))
if indent_str is None:
lines.append('%s %s' % (mod, files))
sub_indent_str = ' '
else:
lines.append(r'%s\-%s %s' % (indent_str, mod, files))
if i == len(nodes)-1:
sub_indent_str = '%s ' % indent_str
else:
sub_indent_str = '%s| ' % indent_str
if sub:
lines.append(_repr_tree_defs(sub, sub_indent_str))
return '\n'.join(lines)
def _dependencies_graph(filename, dep_info):
"""write dependencies as a dot (graphviz) file
"""
done = {}
printer = DotBackend(filename[:-4], rankdir='LR')
printer.emit('URL="." node[shape="box"]')
for modname, dependencies in sorted(six.iteritems(dep_info)):
done[modname] = 1
printer.emit_node(modname)
for depmodname in dependencies:
if depmodname not in done:
done[depmodname] = 1
printer.emit_node(depmodname)
for depmodname, dependencies in sorted(six.iteritems(dep_info)):
for modname in dependencies:
printer.emit_edge(modname, depmodname)
printer.generate(filename)
def _make_graph(filename, dep_info, sect, gtype):
"""generate a dependencies graph and add some information about it in the
report's section
"""
_dependencies_graph(filename, dep_info)
sect.append(Paragraph('%simports graph has been written to %s'
% (gtype, filename)))
# the import checker itself ###################################################
MSGS = {
'E0401': ('Unable to import %s',
'import-error',
'Used when pylint has been unable to import a module.',
{'old_names': [('F0401', 'import-error')]}),
'E0402': ('Attempted relative import beyond top-level package',
'relative-beyond-top-level',
'Used when a relative import tries to access too many levels '
'in the current package.'),
'R0401': ('Cyclic import (%s)',
'cyclic-import',
'Used when a cyclic import between two or more modules is \
detected.'),
'W0401': ('Wildcard import %s',
'wildcard-import',
'Used when `from module import *` is detected.'),
'W0402': ('Uses of a deprecated module %r',
'deprecated-module',
'Used a module marked as deprecated is imported.'),
'W0403': ('Relative import %r, should be %r',
'relative-import',
'Used when an import relative to the package directory is '
'detected.',
{'maxversion': (3, 0)}),
'W0404': ('Reimport %r (imported line %s)',
'reimported',
'Used when a module is reimported multiple times.'),
'W0406': ('Module import itself',
'import-self',
'Used when a module is importing itself.'),
'W0410': ('__future__ import is not the first non docstring statement',
'misplaced-future',
'Python 2.5 and greater require __future__ import to be the \
first non docstring statement in the module.'),
'C0410': ('Multiple imports on one line (%s)',
'multiple-imports',
'Used when import statement importing multiple modules is '
'detected.'),
'C0411': ('%s should be placed before %s',
'wrong-import-order',
'Used when PEP8 import order is not respected (standard imports '
'first, then third-party libraries, then local imports)'),
'C0412': ('Imports from package %s are not grouped',
'ungrouped-imports',
'Used when imports are not grouped by packages'),
'C0413': ('Import "%s" should be placed at the top of the '
'module',
'wrong-import-position',
'Used when code and imports are mixed'),
}
DEFAULT_STANDARD_LIBRARY = ()
DEFAULT_KNOWN_THIRD_PARTY = ('enchant',)
class ImportsChecker(BaseChecker):
"""checks for
* external modules dependencies
* relative / wildcard imports
* cyclic imports
* uses of deprecated modules
"""
__implements__ = IAstroidChecker
name = 'imports'
msgs = MSGS
priority = -2
if six.PY2:
deprecated_modules = ('regsub', 'TERMIOS', 'Bastion', 'rexec')
elif sys.version_info < (3, 5):
deprecated_modules = ('optparse', )
else:
deprecated_modules = ('optparse', 'tkinter.tix')
options = (('deprecated-modules',
{'default' : deprecated_modules,
'type' : 'csv',
'metavar' : '<modules>',
'help' : 'Deprecated modules which should not be used,'
' separated by a comma'}
),
('import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of every (i.e. internal and'
' external) dependencies in the given file'
' (report RP0402 must not be disabled)'}
),
('ext-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of external dependencies in the'
' given file (report RP0402 must not be disabled)'}
),
('int-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of internal dependencies in the'
' given file (report RP0402 must not be disabled)'}
),
('known-standard-library',
{'default': DEFAULT_STANDARD_LIBRARY,
'type': 'csv',
'metavar': '<modules>',
'help': 'Force import order to recognize a module as part of'
' the standard compatibility libraries.'}
),
('known-third-party',
{'default': DEFAULT_KNOWN_THIRD_PARTY,
'type': 'csv',
'metavar': '<modules>',
'help': 'Force import order to recognize a module as part of'
' a third party library.'}
),
('analyse-fallback-blocks',
{'default': False,
'type': 'yn',
'metavar': '<y_or_n>',
'help': 'Analyse import fallback blocks. This can be used to '
'support both Python 2 and 3 compatible code, which means that '
'the block might have code that exists only in one or another '
'interpreter, leading to false positives when analysed.'},
),
('allow-wildcard-with-all',
{'default': False,
'type': 'yn',
'metavar': '<y_or_n>',
'help': 'Allow wildcard imports from modules that define __all__.'}),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self.import_graph = None
self._imports_stack = []
self._first_non_import_node = None
self.__int_dep_info = self.__ext_dep_info = None
self.reports = (('RP0401', 'External dependencies',
self._report_external_dependencies),
('RP0402', 'Modules dependencies graph',
self._report_dependencies_graph),
)
self._site_packages = self._compute_site_packages()
@staticmethod
def _compute_site_packages():
def _normalized_path(path):
return os.path.normcase(os.path.abspath(path))
paths = set()
real_prefix = getattr(sys, 'real_prefix', None)
for prefix in filter(None, (real_prefix, sys.prefix)):
path = sysconfig.get_python_lib(prefix=prefix)
path = _normalized_path(path)
paths.add(path)
# Handle Debian's derivatives /usr/local.
if os.path.isfile("/etc/debian_version"):
for prefix in filter(None, (real_prefix, sys.prefix)):
libpython = os.path.join(prefix, "local", "lib",
"python" + sysconfig.get_python_version(),
"dist-packages")
paths.add(libpython)
return paths
def open(self):
"""called before visiting project (i.e set of modules)"""
self.linter.add_stats(dependencies={})
self.linter.add_stats(cycles=[])
self.stats = self.linter.stats
self.import_graph = collections.defaultdict(set)
self._excluded_edges = collections.defaultdict(set)
self._ignored_modules = get_global_option(
self, 'ignored-modules', default=[])
def _import_graph_without_ignored_edges(self):
filtered_graph = copy.deepcopy(self.import_graph)
for node in filtered_graph:
filtered_graph[node].difference_update(self._excluded_edges[node])
return filtered_graph
def close(self):
"""called before visiting project (i.e set of modules)"""
if self.linter.is_message_enabled('cyclic-import'):
graph = self._import_graph_without_ignored_edges()
vertices = list(graph)
for cycle in get_cycles(graph, vertices=vertices):
self.add_message('cyclic-import', args=' -> '.join(cycle))
@check_messages(*MSGS.keys())
def visit_import(self, node):
"""triggered when an import statement is seen"""
self._check_reimport(node)
modnode = node.root()
names = [name for name, _ in node.names]
if len(names) >= 2:
self.add_message('multiple-imports', args=', '.join(names), node=node)
for name in names:
self._check_deprecated_module(node, name)
imported_module = self._get_imported_module(node, name)
if isinstance(node.parent, astroid.Module):
# Allow imports nested
self._check_position(node)
if isinstance(node.scope(), astroid.Module):
self._record_import(node, imported_module)
if imported_module is None:
continue
self._check_relative_import(modnode, node, imported_module, name)
self._add_imported_module(node, imported_module.name)
@check_messages(*(MSGS.keys()))
def visit_importfrom(self, node):
"""triggered when a from statement is seen"""
basename = node.modname
imported_module = self._get_imported_module(node, basename)
self._check_misplaced_future(node)
self._check_deprecated_module(node, basename)
self._check_wildcard_imports(node, imported_module)
self._check_same_line_imports(node)
self._check_reimport(node, basename=basename, level=node.level)
if isinstance(node.parent, astroid.Module):
# Allow imports nested
self._check_position(node)
if isinstance(node.scope(), astroid.Module):
self._record_import(node, imported_module)
if imported_module is None:
return
modnode = node.root()
self._check_relative_import(modnode, node, imported_module, basename)
for name, _ in node.names:
if name != '*':
self._add_imported_module(node, '%s.%s' % (imported_module.name, name))
@check_messages(*(MSGS.keys()))
def leave_module(self, node):
# Check imports are grouped by category (standard, 3rd party, local)
std_imports, ext_imports, loc_imports = self._check_imports_order(node)
# Check imports are grouped by package within a given category
met = set()
current_package = None
for import_node, import_name in std_imports + ext_imports + loc_imports:
if not self.linter.is_message_enabled('ungrouped-imports', import_node.fromlineno):
continue
package, _, _ = import_name.partition('.')
if current_package and current_package != package and package in met:
self.add_message('ungrouped-imports', node=import_node,
args=package)
current_package = package
met.add(package)
self._imports_stack = []
self._first_non_import_node = None
def compute_first_non_import_node(self, node):
if not self.linter.is_message_enabled('wrong-import-position', node.fromlineno):
return
# if the node does not contain an import instruction, and if it is the
# first node of the module, keep a track of it (all the import positions
# of the module will be compared to the position of this first
# instruction)
if self._first_non_import_node:
return
if not isinstance(node.parent, astroid.Module):
return
nested_allowed = [astroid.TryExcept, astroid.TryFinally]
is_nested_allowed = [
allowed for allowed in nested_allowed if isinstance(node, allowed)]
if is_nested_allowed and \
any(node.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
if isinstance(node, astroid.Assign):
# Add compatibility for module level dunder names
# https://www.python.org/dev/peps/pep-0008/#module-level-dunder-names
valid_targets = [
isinstance(target, astroid.AssignName) and
target.name.startswith('__') and target.name.endswith('__')
for target in node.targets]
if all(valid_targets):
return
self._first_non_import_node = node
visit_tryfinally = visit_tryexcept = visit_assignattr = visit_assign = \
visit_ifexp = visit_comprehension = visit_expr = visit_if = \
compute_first_non_import_node
def visit_functiondef(self, node):
if not self.linter.is_message_enabled('wrong-import-position', node.fromlineno):
return
# If it is the first non import instruction of the module, record it.
if self._first_non_import_node:
return
# Check if the node belongs to an `If` or a `Try` block. If they
# contain imports, skip recording this node.
if not isinstance(node.parent.scope(), astroid.Module):
return
root = node
while not isinstance(root.parent, astroid.Module):
root = root.parent
if isinstance(root, (astroid.If, astroid.TryFinally, astroid.TryExcept)):
if any(root.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
self._first_non_import_node = node
visit_classdef = visit_for = visit_while = visit_functiondef
def _check_misplaced_future(self, node):
basename = node.modname
if basename == '__future__':
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (isinstance(prev, astroid.ImportFrom)
and prev.modname == '__future__'):
self.add_message('misplaced-future', node=node)
return
def _check_same_line_imports(self, node):
# Detect duplicate imports on the same line.
names = (name for name, _ in node.names)
counter = collections.Counter(names)
for name, count in counter.items():
if count > 1:
self.add_message('reimported', node=node,
args=(name, node.fromlineno))
def _check_position(self, node):
"""Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction
"""
# if a first non-import instruction has already been encountered,
# it means the import comes after it and therefore is not well placed
if self._first_non_import_node:
self.add_message('wrong-import-position', node=node,
args=node.as_string())
def _record_import(self, node, importedmodnode):
"""Record the package `node` imports from"""
importedname = importedmodnode.name if importedmodnode else None
if not importedname:
if isinstance(node, astroid.ImportFrom):
importedname = node.modname
else:
importedname = node.names[0][0].split('.')[0]
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
# We need the impotedname with first point to detect local package
# Example of node:
# 'from .my_package1 import MyClass1'
# the output should be '.my_package1' instead of 'my_package1'
# Example of node:
# 'from . import my_package2'
# the output should be '.my_package2' instead of '{pyfile}'
importedname = '.' + importedname
self._imports_stack.append((node, importedname))
@staticmethod
def _is_fallback_import(node, imports):
imports = [import_node for (import_node, _) in imports]
return any(astroid.are_exclusive(import_node, node)
for import_node in imports)
def _check_imports_order(self, _module_node):
"""Checks imports of module `node` are grouped by category
Imports must follow this order: standard, 3rd party, local
"""
extern_imports = []
local_imports = []
std_imports = []
extern_not_ignored = []
local_not_ignored = []
isort_obj = isort.SortImports(
file_contents='', known_third_party=self.config.known_third_party,
known_standard_library=self.config.known_standard_library,
)
for node, modname in self._imports_stack:
if modname.startswith('.'):
package = '.' + modname.split('.')[1]
else:
package = modname.split('.')[0]
nested = not isinstance(node.parent, astroid.Module)
ignore_for_import_order = not self.linter.is_message_enabled('wrong-import-order',
node.fromlineno)
import_category = isort_obj.place_module(package)
if import_category in ('FUTURE', 'STDLIB'):
std_imports.append((node, package))
wrong_import = extern_not_ignored or local_not_ignored
if self._is_fallback_import(node, wrong_import):
continue
if wrong_import and not nested:
self.add_message('wrong-import-order', node=node,
args=('standard import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category in ('FIRSTPARTY', 'THIRDPARTY'):
extern_imports.append((node, package))
if not nested and not ignore_for_import_order:
extern_not_ignored.append((node, package))
wrong_import = local_not_ignored
if wrong_import and not nested:
self.add_message('wrong-import-order', node=node,
args=('external import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category == 'LOCALFOLDER':
local_imports.append((node, package))
if not nested and not ignore_for_import_order:
local_not_ignored.append((node, package))
return std_imports, extern_imports, local_imports
def _get_imported_module(self, importnode, modname):
try:
return importnode.do_import_module(modname)
except astroid.TooManyLevelsError:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
self.add_message('relative-beyond-top-level', node=importnode)
except astroid.AstroidBuildingException:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
if not self.config.analyse_fallback_blocks and is_from_fallback_block(importnode):
return None
dotted_modname = _get_import_name(importnode, modname)
self.add_message('import-error', args=repr(dotted_modname),
node=importnode)
def _check_relative_import(self, modnode, importnode, importedmodnode,
importedasname):
"""check relative import. node is either an Import or From node, modname
the imported module name.
"""
if not self.linter.is_message_enabled('relative-import'):
return None
if importedmodnode.file is None:
return False # built-in module
if modnode is importedmodnode:
return False # module importing itself
if modnode.absolute_import_activated() or getattr(importnode, 'level', None):
return False
if importedmodnode.name != importedasname:
# this must be a relative import...
self.add_message('relative-import',
args=(importedasname, importedmodnode.name),
node=importnode)
return None
return None
def _add_imported_module(self, node, importedmodname):
"""notify an imported module, used to analyze dependencies"""
module_file = node.root().file
context_name = node.root().name
base = os.path.splitext(os.path.basename(module_file))[0]
# Determine if we have a `from .something import` in a package's
# __init__. This means the module will never be able to import
# itself using this condition (the level will be bigger or
# if the same module is named as the package, it will be different
# anyway).
if isinstance(node, astroid.ImportFrom):
if node.level and node.level > 0 and base == '__init__':
return
try:
importedmodname = get_module_part(importedmodname,
module_file)
except ImportError:
pass
if context_name == importedmodname:
self.add_message('import-self', node=node)
elif not is_standard_module(importedmodname):
# handle dependencies
importedmodnames = self.stats['dependencies'].setdefault(
importedmodname, set())
if context_name not in importedmodnames:
importedmodnames.add(context_name)
# update import graph
self.import_graph[context_name].add(importedmodname)
if not self.linter.is_message_enabled('cyclic-import', line=node.lineno):
self._excluded_edges[context_name].add(importedmodname)
def _check_deprecated_module(self, node, mod_path):
"""check if the module is deprecated"""
for mod_name in self.config.deprecated_modules:
if mod_path == mod_name or mod_path.startswith(mod_name + '.'):
self.add_message('deprecated-module', node=node, args=mod_path)
def _check_reimport(self, node, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if not self.linter.is_message_enabled('reimported'):
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for known_context, known_level in contexts:
for name, alias in node.names:
first = _get_first_import(
node, known_context,
name, basename,
known_level, alias)
if first is not None:
self.add_message('reimported', node=node,
args=(name, first.fromlineno))
def _report_external_dependencies(self, sect, _, _dummy):
"""return a verbatim layout for displaying dependencies"""
dep_info = _make_tree_defs(six.iteritems(self._external_dependencies_info()))
if not dep_info:
raise EmptyReportError()
tree_str = _repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
def _report_dependencies_graph(self, sect, _, _dummy):
"""write dependencies as a dot (graphviz) file"""
dep_info = self.stats['dependencies']
if not dep_info or not (self.config.import_graph
or self.config.ext_import_graph
or self.config.int_import_graph):
raise EmptyReportError()
filename = self.config.import_graph
if filename:
_make_graph(filename, dep_info, sect, '')
filename = self.config.ext_import_graph
if filename:
_make_graph(filename, self._external_dependencies_info(),
sect, 'external ')
filename = self.config.int_import_graph
if filename:
_make_graph(filename, self._internal_dependencies_info(),
sect, 'internal ')
def _external_dependencies_info(self):
"""return cached external dependencies information or build and
cache them
"""
if self.__ext_dep_info is None:
package = self.linter.current_name
self.__ext_dep_info = result = {}
for importee, importers in six.iteritems(self.stats['dependencies']):
if not importee.startswith(package):
result[importee] = importers
return self.__ext_dep_info
def _internal_dependencies_info(self):
"""return cached internal dependencies information or build and
cache them
"""
if self.__int_dep_info is None:
package = self.linter.current_name
self.__int_dep_info = result = {}
for importee, importers in six.iteritems(self.stats['dependencies']):
if importee.startswith(package):
result[importee] = importers
return self.__int_dep_info
def _check_wildcard_imports(self, node, imported_module):
wildcard_import_is_allowed = (
self._wildcard_import_is_allowed(imported_module)
)
for name, _ in node.names:
if name == '*' and not wildcard_import_is_allowed:
self.add_message('wildcard-import', args=node.modname, node=node)
def _wildcard_import_is_allowed(self, imported_module):
return (self.config.allow_wildcard_with_all
and imported_module is not None
and '__all__' in imported_module.locals)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ImportsChecker(linter))
| 1 | 9,703 | I think the correct pragma is `# -*- coding: utf-8 -*- | PyCQA-pylint | py |
@@ -58,6 +58,9 @@ from scapy.layers.inet import TCP
from scapy.packet import Raw
from scapy.packet import Packet, bind_layers
+from . import iec104, iec104_fields, iec104_information_elements, \
+ iec104_information_objects # noqa: F401
+
IEC_104_IANA_PORT = 2404
# direction - from the central station to the substation | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Thomas Tannhaeuser <[email protected]>
# This program is published under a GPLv2 license
#
# scapy.contrib.description = IEC-60870-5-104 APCI / APDU layer definitions
# scapy.contrib.status = loads
"""
IEC 60870-5-104
~~~~~~~~~~~~~~~
:description:
This module provides the IEC 60870-5-104 (common short name: iec104)
layer, the information objects and related information element
definitions.
normative references:
- IEC 60870-5-4:1994 (atomic base types / data format)
- IEC 60870-5-101:2003 (information elements (sec. 7.2.6) and
ASDU definition (sec. 7.3))
- IEC 60870-5-104:2006 (information element TSC (sec. 8.8, p. 44))
:TODO:
- add allowed direction to IO attributes
(but this could be derived from the name easily <--> )
- information elements / objects need more testing
(e.g. on live traffic w comparison against tshark)
:NOTES:
- bit and octet numbering is used as in the related standards
(they usually start with index one instead of zero)
- some of the information objects are only valid for IEC 60870-5-101 -
so usually they should never appear on the network as iec101 uses
serial connections. I added them if decoding of those messages is
needed cause one goes to implement a iec101<-->iec104 gateway or
hits such a gateway that acts not standard conform (e.g. by
forwarding 101 messages to a 104 network)
"""
from scapy.compat import orb
from scapy.contrib.scada.iec104.iec104_fields import LEThreeBytesField, \
IEC104SequenceNumber
from scapy.contrib.scada.iec104.iec104_information_objects import \
IEC104_IO_NAMES, IEC104_IO_WITH_IOA_CLASSES, \
IEC104_IO_CLASSES, IEC104_IO_ID_C_RD_NA_1, IEC104_IO_C_RD_NA_1
from scapy.config import conf
from scapy.contrib.scada.iec104.iec104_information_objects import \
IEC104_IO_Packet
from scapy.error import warning, Scapy_Exception
from scapy.fields import ByteField, BitField, ByteEnumField, PacketListField, \
BitEnumField, XByteField, FieldLenField, LEShortField, BitFieldLenField
from scapy.layers.inet import TCP
from scapy.packet import Raw
from scapy.packet import Packet, bind_layers
IEC_104_IANA_PORT = 2404
# direction - from the central station to the substation
IEC104_CONTROL_DIRECTION = 0
IEC104_CENTRAL_2_SUB_DIR = IEC104_CONTROL_DIRECTION
# direction - from the substation to the central station
IEC104_MONITOR_DIRECTION = 1
IEC104_SUB_2_CENTRAL_DIR = IEC104_MONITOR_DIRECTION
IEC104_DIRECTIONS = {
IEC104_MONITOR_DIRECTION: 'monitor direction (sub -> central)',
IEC104_CONTROL_DIRECTION: 'control direction (central -> sub)',
}
# COT - cause of transmission
IEC104_COT_UNDEFINED = 0
IEC104_COT_CYC = 1
IEC104_COT_BACK = 2
IEC104_COT_SPONT = 3
IEC104_COT_INIT = 4
IEC104_COT_REQ = 5
IEC104_COT_ACT = 6
IEC104_COT_ACTCON = 7
IEC104_COT_DEACT = 8
IEC104_COT_DEACTCON = 9
IEC104_COT_ACTTERM = 10
IEC104_COT_RETREM = 11
IEC104_COT_RETLOC = 12
IEC104_COT_FILE = 13
IEC104_COT_RESERVED_14 = 14
IEC104_COT_RESERVED_15 = 15
IEC104_COT_RESERVED_16 = 16
IEC104_COT_RESERVED_17 = 17
IEC104_COT_RESERVED_18 = 18
IEC104_COT_RESERVED_19 = 19
IEC104_COT_INROGEN = 20
IEC104_COT_INRO1 = 21
IEC104_COT_INRO2 = 22
IEC104_COT_INRO3 = 23
IEC104_COT_INRO4 = 24
IEC104_COT_INRO5 = 25
IEC104_COT_INRO6 = 26
IEC104_COT_INRO7 = 27
IEC104_COT_INRO8 = 28
IEC104_COT_INRO9 = 29
IEC104_COT_INRO10 = 30
IEC104_COT_INRO11 = 31
IEC104_COT_INRO12 = 32
IEC104_COT_INRO13 = 33
IEC104_COT_INRO14 = 34
IEC104_COT_INRO15 = 35
IEC104_COT_INRO16 = 36
IEC104_COT_REQCOGEN = 37
IEC104_COT_REQCO1 = 38
IEC104_COT_REQCO2 = 39
IEC104_COT_REQCO3 = 40
IEC104_COT_REQCO4 = 41
IEC104_COT_RESERVED_42 = 42
IEC104_COT_RESERVED_43 = 43
IEC104_COT_UNKNOWN_TYPE_CODE = 44
IEC104_COT_UNKNOWN_TRANSMIT_REASON = 45
IEC104_COT_UNKNOWN_COMMON_ADDRESS_OF_ASDU = 46
IEC104_COT_UNKNOWN_ADDRESS_OF_INFORMATION_OBJECT = 47
IEC104_COT_PRIVATE_48 = 48
IEC104_COT_PRIVATE_49 = 49
IEC104_COT_PRIVATE_50 = 50
IEC104_COT_PRIVATE_51 = 51
IEC104_COT_PRIVATE_52 = 52
IEC104_COT_PRIVATE_53 = 53
IEC104_COT_PRIVATE_54 = 54
IEC104_COT_PRIVATE_55 = 55
IEC104_COT_PRIVATE_56 = 56
IEC104_COT_PRIVATE_57 = 57
IEC104_COT_PRIVATE_58 = 58
IEC104_COT_PRIVATE_59 = 59
IEC104_COT_PRIVATE_60 = 60
IEC104_COT_PRIVATE_61 = 61
IEC104_COT_PRIVATE_62 = 62
IEC104_COT_PRIVATE_63 = 63
CAUSE_OF_TRANSMISSIONS = {
IEC104_COT_UNDEFINED: 'undefined',
IEC104_COT_CYC: 'cyclic (per/cyc)',
IEC104_COT_BACK: 'background (back)',
IEC104_COT_SPONT: 'spontaneous (spont)',
IEC104_COT_INIT: 'initialized (init)',
IEC104_COT_REQ: 'request (req)',
IEC104_COT_ACT: 'activation (act)',
IEC104_COT_ACTCON: 'activation confirmed (actcon)',
IEC104_COT_DEACT: 'activation canceled (deact)',
IEC104_COT_DEACTCON: 'activation cancellation confirmed (deactcon)',
IEC104_COT_ACTTERM: 'activation finished (actterm)',
IEC104_COT_RETREM: 'feedback caused by remote command (retrem)',
IEC104_COT_RETLOC: 'feedback caused by local command (retloc)',
IEC104_COT_FILE: 'file transfer (file)',
IEC104_COT_RESERVED_14: 'reserved_14',
IEC104_COT_RESERVED_15: 'reserved_15',
IEC104_COT_RESERVED_16: 'reserved_16',
IEC104_COT_RESERVED_17: 'reserved_17',
IEC104_COT_RESERVED_18: 'reserved_18',
IEC104_COT_RESERVED_19: 'reserved_19',
IEC104_COT_INROGEN: 'queried by station (inrogen)',
IEC104_COT_INRO1: 'queried by query to group 1 (inro1)',
IEC104_COT_INRO2: 'queried by query to group 2 (inro2)',
IEC104_COT_INRO3: 'queried by query to group 3 (inro3)',
IEC104_COT_INRO4: 'queried by query to group 4 (inro4)',
IEC104_COT_INRO5: 'queried by query to group 5 (inro5)',
IEC104_COT_INRO6: 'queried by query to group 6 (inro6)',
IEC104_COT_INRO7: 'queried by query to group 7 (inro7)',
IEC104_COT_INRO8: 'queried by query to group 8 (inro8)',
IEC104_COT_INRO9: 'queried by query to group 9 (inro9)',
IEC104_COT_INRO10: 'queried by query to group 10 (inro10)',
IEC104_COT_INRO11: 'queried by query to group 11 (inro11)',
IEC104_COT_INRO12: 'queried by query to group 12 (inro12)',
IEC104_COT_INRO13: 'queried by query to group 13 (inro13)',
IEC104_COT_INRO14: 'queried by query to group 14 (inro14)',
IEC104_COT_INRO15: 'queried by query to group 15 (inro15)',
IEC104_COT_INRO16: 'queried by query to group 16 (inro16)',
IEC104_COT_REQCOGEN: 'queried by counter general interrogation (reqcogen)',
IEC104_COT_REQCO1: 'queried by query to counter group 1 (reqco1)',
IEC104_COT_REQCO2: 'queried by query to counter group 2 (reqco2)',
IEC104_COT_REQCO3: 'queried by query to counter group 3 (reqco3)',
IEC104_COT_REQCO4: 'queried by query to counter group 4 (reqco4)',
IEC104_COT_RESERVED_42: 'reserved_42',
IEC104_COT_RESERVED_43: 'reserved_43',
IEC104_COT_UNKNOWN_TYPE_CODE: 'unknown type code',
IEC104_COT_UNKNOWN_TRANSMIT_REASON: 'unknown transmit reason',
IEC104_COT_UNKNOWN_COMMON_ADDRESS_OF_ASDU:
'unknown common address of ASDU',
IEC104_COT_UNKNOWN_ADDRESS_OF_INFORMATION_OBJECT:
'unknown address of information object',
IEC104_COT_PRIVATE_48: 'private_48',
IEC104_COT_PRIVATE_49: 'private_49',
IEC104_COT_PRIVATE_50: 'private_50',
IEC104_COT_PRIVATE_51: 'private_51',
IEC104_COT_PRIVATE_52: 'private_52',
IEC104_COT_PRIVATE_53: 'private_53',
IEC104_COT_PRIVATE_54: 'private_54',
IEC104_COT_PRIVATE_55: 'private_55',
IEC104_COT_PRIVATE_56: 'private_56',
IEC104_COT_PRIVATE_57: 'private_57',
IEC104_COT_PRIVATE_58: 'private_58',
IEC104_COT_PRIVATE_59: 'private_59',
IEC104_COT_PRIVATE_60: 'private_60',
IEC104_COT_PRIVATE_61: 'private_61',
IEC104_COT_PRIVATE_62: 'private_62',
IEC104_COT_PRIVATE_63: 'private_63'
}
IEC104_APDU_TYPE_UNKNOWN = 0x00
IEC104_APDU_TYPE_I_SEQ_IOA = 0x01
IEC104_APDU_TYPE_I_SINGLE_IOA = 0x02
IEC104_APDU_TYPE_U = 0x03
IEC104_APDU_TYPE_S = 0x04
def _iec104_apci_type_from_packet(data):
"""
the type of the message is encoded in octet 1..4
oct 1, bit 1 2 oct 3, bit 1
I Message 0 1|0 0
S Message 1 0 0
U Message 1 1 0
see EN 60870-5-104:2006, sec. 5 (p. 13, fig. 6,7,8)
"""
oct_1 = orb(data[2])
oct_3 = orb(data[4])
oct_1_bit_1 = bool(oct_1 & 1)
oct_1_bit_2 = bool(oct_1 & 2)
oct_3_bit_1 = bool(oct_3 & 1)
if oct_1_bit_1 is False and oct_3_bit_1 is False:
if len(data) < 8:
return IEC104_APDU_TYPE_UNKNOWN
is_seq_ioa = ((orb(data[7]) & 0x80) == 0x80)
if is_seq_ioa:
return IEC104_APDU_TYPE_I_SEQ_IOA
else:
return IEC104_APDU_TYPE_I_SINGLE_IOA
if oct_1_bit_1 and oct_1_bit_2 is False and oct_3_bit_1 is False:
return IEC104_APDU_TYPE_S
if oct_1_bit_1 and oct_1_bit_2 and oct_3_bit_1 is False:
return IEC104_APDU_TYPE_U
return IEC104_APDU_TYPE_UNKNOWN
class IEC104_APDU(Packet):
"""
basic Application Protocol Data Unit definition used by S/U/I messages
"""
def guess_payload_class(self, payload):
payload_len = len(payload)
if payload_len < 6:
return self.default_payload_class(payload)
if orb(payload[0]) != 0x68:
self.default_payload_class(payload)
# the length field contains the number of bytes starting from the
# first control octet
apdu_length = 2 + orb(payload[1])
if payload_len < apdu_length:
warning(
'invalid len of APDU. given len: {} available len: {}'.format(
apdu_length, payload_len))
return self.default_payload_class(payload)
apdu_type = _iec104_apci_type_from_packet(payload)
return IEC104_APDU_CLASSES.get(apdu_type,
self.default_payload_class(payload))
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
"""
detect type of the message by checking packet data
:param _pkt: raw bytes of the packet layer data to be checked
:param args: unused
:param kargs: unused
:return: class of the detected message type
"""
if _iec104_is_i_apdu_seq_ioa(_pkt):
return IEC104_I_Message_SeqIOA
if _iec104_is_i_apdu_single_ioa(_pkt):
return IEC104_I_Message_SingleIOA
if _iec104_is_u_apdu(_pkt):
return IEC104_U_Message
if _iec104_is_s_apdu(_pkt):
return IEC104_S_Message
return Raw
class IEC104_S_Message(IEC104_APDU):
"""
message used for ack of received I-messages
"""
name = 'IEC-104 S APDU'
fields_desc = [
XByteField('start', 0x68),
ByteField("apdu_length", 4),
ByteField('octet_1', 0x01),
ByteField('octet_2', 0),
IEC104SequenceNumber('rx_seq_num', 0),
]
class IEC104_U_Message(IEC104_APDU):
"""
message used for connection tx control (start/stop) and monitoring (test)
"""
name = 'IEC-104 U APDU'
fields_desc = [
XByteField('start', 0x68),
ByteField("apdu_length", 4),
BitField('testfr_con', 0, 1),
BitField('testfr_act', 0, 1),
BitField('stopdt_con', 0, 1),
BitField('stopdt_act', 0, 1),
BitField('startdt_con', 0, 1),
BitField('startdt_act', 0, 1),
BitField('octet_1_1_2', 3, 2),
ByteField('octet_2', 0),
ByteField('octet_3', 0),
ByteField('octet_4', 0)
]
def _i_msg_io_dispatcher_sequence(pkt, next_layer_data):
"""
get the type id and return the matching ASDU instance
"""
next_layer_class_type = IEC104_IO_CLASSES.get(pkt.type_id, conf.raw_layer)
return next_layer_class_type(next_layer_data)
def _i_msg_io_dispatcher_single(pkt, next_layer_data):
"""
get the type id and return the matching ASDU instance
(information object address + regular ASDU information object fields)
"""
next_layer_class_type = IEC104_IO_WITH_IOA_CLASSES.get(pkt.type_id,
conf.raw_layer)
return next_layer_class_type(next_layer_data)
class IEC104ASDUPacketListField(PacketListField):
"""
used to add a list of information objects to an I-message
"""
def m2i(self, pkt, m):
"""
add calling layer instance to the cls()-signature
:param pkt: calling layer instance
:param m: raw data forming the next layer
:return: instance of the class representing the next layer
"""
return self.cls(pkt, m)
class IEC104_I_Message_StructureException(Scapy_Exception):
"""
Exception raised if payload is not of type Information Object
"""
pass
class IEC104_I_Message(IEC104_APDU):
"""
message used for transmitting data (APDU - Application Protocol Data Unit)
APDU: MAGIC + APCI + ASDU
MAGIC: 0x68
APCI : Control Information (rx/tx seq/ack numbers)
ASDU : Application Service Data Unit - information object related data
see EN 60870-5-104:2006, sec. 5 (p. 12)
"""
name = 'IEC-104 I APDU'
IEC_104_MAGIC = 0x68 # dec -> 104
SQ_FLAG_SINGLE = 0
SQ_FLAG_SEQUENCE = 1
SQ_FLAGS = {
SQ_FLAG_SINGLE: 'single',
SQ_FLAG_SEQUENCE: 'sequence'
}
TEST_DISABLED = 0
TEST_ENABLED = 1
TEST_FLAGS = {
TEST_DISABLED: 'disabled',
TEST_ENABLED: 'enabled'
}
ACK_POSITIVE = 0
ACK_NEGATIVE = 1
ACK_FLAGS = {
ACK_POSITIVE: 'positive',
ACK_NEGATIVE: 'negative'
}
fields_desc = []
def __init__(self, _pkt=b"", post_transform=None, _internal=0,
_underlayer=None, **fields):
super(IEC104_I_Message, self).__init__(_pkt=_pkt,
post_transform=post_transform,
_internal=_internal,
_underlayer=_underlayer,
**fields)
if 'io' in fields and fields['io']:
self._information_object_update(fields['io'])
def _information_object_update(self, io_instances):
"""
set the type_id in the ASDU header based on the given information
object (io) and check for valid structure
:param io_instances: information object
"""
if not isinstance(io_instances, list):
io_instances = [io_instances]
first_io = io_instances[0]
first_io_class = first_io.__class__
if not issubclass(first_io_class, IEC104_IO_Packet):
raise IEC104_I_Message_StructureException(
'information object payload must be a subclass of '
'IEC104_IO_Packet')
self.type_id = first_io.iec104_io_type_id()
# ensure all io elements within the ASDU share the same class type
for io_inst in io_instances[1:]:
if io_inst.__class__ != first_io_class:
raise IEC104_I_Message_StructureException(
'each information object within the ASDU must be of '
'the same class type (first io: {}, '
'current io: {})'.format(first_io_class._name,
io_inst._name))
class IEC104_I_Message_SeqIOA(IEC104_I_Message):
"""
all information objects share a base information object address field
sq = 1, see EN 60870-5-101:2003, sec. 7.2.2.1 (p. 33)
"""
name = 'IEC-104 I APDU (Seq IOA)'
fields_desc = [
# APCI
XByteField('start', IEC104_I_Message.IEC_104_MAGIC),
FieldLenField("apdu_length", None, fmt="!B", length_of='io',
adjust=lambda pkt, x: x + 13),
IEC104SequenceNumber('tx_seq_num', 0),
IEC104SequenceNumber('rx_seq_num', 0),
# ASDU
ByteEnumField('type_id', 0, IEC104_IO_NAMES),
BitEnumField('sq', IEC104_I_Message.SQ_FLAG_SEQUENCE, 1,
IEC104_I_Message.SQ_FLAGS),
BitFieldLenField('num_io', None, 7, count_of='io'),
BitEnumField('test', 0, 1, IEC104_I_Message.TEST_FLAGS),
BitEnumField('ack', 0, 1, IEC104_I_Message.ACK_FLAGS),
BitEnumField('cot', 0, 6, CAUSE_OF_TRANSMISSIONS),
ByteField('origin_address', 0),
LEShortField('common_asdu_address', 0),
LEThreeBytesField('information_object_address', 0),
IEC104ASDUPacketListField('io',
conf.raw_layer(),
_i_msg_io_dispatcher_sequence,
length_from=lambda pkt: pkt.apdu_length - 13)
]
def post_dissect(self, s):
if self.type_id == IEC104_IO_ID_C_RD_NA_1:
# IEC104_IO_ID_C_RD_NA_1 has no payload. we will add the layer
# manually to the stack right now. we do this num_io times
# as - even if it makes no sense - someone could decide
# to add more than one read commands in a sequence...
setattr(self, 'io', [IEC104_IO_C_RD_NA_1()] * self.num_io)
return s
class IEC104_I_Message_SingleIOA(IEC104_I_Message):
"""
every information object contains an individual information object
address field
sq = 0, see EN 60870-5-101:2003, sec. 7.2.2.1 (p. 33)
"""
name = 'IEC-104 I APDU (single IOA)'
fields_desc = [
# APCI
XByteField('start', IEC104_I_Message.IEC_104_MAGIC),
FieldLenField("apdu_length", None, fmt="!B", length_of='io',
adjust=lambda pkt, x: x + 10),
IEC104SequenceNumber('tx_seq_num', 0),
IEC104SequenceNumber('rx_seq_num', 0),
# ASDU
ByteEnumField('type_id', 0, IEC104_IO_NAMES),
BitEnumField('sq', IEC104_I_Message.SQ_FLAG_SINGLE, 1,
IEC104_I_Message.SQ_FLAGS),
BitFieldLenField('num_io', None, 7, count_of='io'),
BitEnumField('test', 0, 1, IEC104_I_Message.TEST_FLAGS),
BitEnumField('ack', 0, 1, IEC104_I_Message.ACK_FLAGS),
BitEnumField('cot', 0, 6, CAUSE_OF_TRANSMISSIONS),
ByteField('origin_address', 0),
LEShortField('common_asdu_address', 0),
IEC104ASDUPacketListField('io',
conf.raw_layer(),
_i_msg_io_dispatcher_single,
length_from=lambda pkt: pkt.apdu_length - 10)
]
IEC104_APDU_CLASSES = {
IEC104_APDU_TYPE_UNKNOWN: conf.raw_layer,
IEC104_APDU_TYPE_I_SEQ_IOA: IEC104_I_Message_SeqIOA,
IEC104_APDU_TYPE_I_SINGLE_IOA: IEC104_I_Message_SingleIOA,
IEC104_APDU_TYPE_U: IEC104_U_Message,
IEC104_APDU_TYPE_S: IEC104_S_Message
}
def _iec104_is_i_apdu_seq_ioa(payload):
len_payload = len(payload)
if len_payload < 6:
return False
if orb(payload[0]) != 0x68 or (
orb(payload[1]) + 2) > len_payload or len_payload < 8:
return False
return IEC104_APDU_TYPE_I_SEQ_IOA == _iec104_apci_type_from_packet(payload)
def _iec104_is_i_apdu_single_ioa(payload):
len_payload = len(payload)
if len_payload < 6:
return False
if orb(payload[0]) != 0x68 or (
orb(payload[1]) + 2) > len_payload or len_payload < 8:
return False
return IEC104_APDU_TYPE_I_SINGLE_IOA == _iec104_apci_type_from_packet(
payload)
def _iec104_is_u_apdu(payload):
if len(payload) < 6:
return False
if orb(payload[0]) != 0x68 or orb(payload[1]) != 4:
return False
return IEC104_APDU_TYPE_U == _iec104_apci_type_from_packet(payload)
def _iec104_is_s_apdu(payload):
if len(payload) < 6:
return False
if orb(payload[0]) != 0x68 or orb(payload[1]) != 4:
return False
return IEC104_APDU_TYPE_S == _iec104_apci_type_from_packet(payload)
def iec104_decode(payload):
"""
can be used to dissect payload of a TCP connection
:param payload: the application layer data (IEC104-APDU(s))
:return: iec104 (I/U/S) message instance, conf.raw_layer() if unknown
"""
if _iec104_is_i_apdu_seq_ioa(payload):
return IEC104_I_Message_SeqIOA(payload)
elif _iec104_is_i_apdu_single_ioa(payload):
return IEC104_I_Message_SingleIOA(payload)
elif _iec104_is_s_apdu(payload):
return IEC104_S_Message(payload)
elif _iec104_is_u_apdu(payload):
return IEC104_U_Message(payload)
else:
return conf.raw_layer(payload)
bind_layers(TCP, IEC104_APDU, sport=IEC_104_IANA_PORT)
bind_layers(TCP, IEC104_APDU, dport=IEC_104_IANA_PORT)
| 1 | 16,067 | This fixes some import errors, due to the fact that this file has the same name than its module. | secdev-scapy | py |
@@ -21,8 +21,9 @@ type Folder struct {
fs *FS
list *FolderList
- handleMu sync.RWMutex
- h *libkbfs.TlfHandle
+ handleMu sync.RWMutex
+ h *libkbfs.TlfHandle
+ hPreferredName string
folderBranchMu sync.Mutex
folderBranch libkbfs.FolderBranch | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libdokan
import (
"fmt"
"strings"
"sync"
"time"
"github.com/keybase/kbfs/dokan"
"github.com/keybase/kbfs/libfs"
"github.com/keybase/kbfs/libkbfs"
"golang.org/x/net/context"
)
// Folder represents KBFS top-level folders
type Folder struct {
fs *FS
list *FolderList
handleMu sync.RWMutex
h *libkbfs.TlfHandle
folderBranchMu sync.Mutex
folderBranch libkbfs.FolderBranch
// Protects the nodes map.
mu sync.Mutex
// Map KBFS nodes to FUSE nodes, to be able to handle multiple
// lookups and incoming change notifications. A node is present
// here if the kernel holds a reference to it.
//
// If we ever support hardlinks, this would need refcounts.
//
// Children must call folder.forgetChildLocked on receiving the
// FUSE Forget request.
nodes map[libkbfs.NodeID]dokan.File
// Protects the updateChan.
updateMu sync.Mutex
// updateChan is non-nil when the user disables updates via the
// file system. Sending a struct{}{} on this channel will unpause
// the updates.
updateChan chan<- struct{}
// noForget is turned on when the folder may not be forgotten
// because it has attached special file state with it.
noForget bool
}
func newFolder(fl *FolderList, h *libkbfs.TlfHandle) *Folder {
f := &Folder{
fs: fl.fs,
list: fl,
h: h,
nodes: map[libkbfs.NodeID]dokan.File{},
}
return f
}
func (f *Folder) name() libkbfs.CanonicalTlfName {
f.handleMu.RLock()
defer f.handleMu.RUnlock()
return f.h.GetCanonicalName()
}
func (f *Folder) setFolderBranch(folderBranch libkbfs.FolderBranch) error {
f.folderBranchMu.Lock()
defer f.folderBranchMu.Unlock()
// TODO unregister all at unmount
err := f.list.fs.config.Notifier().RegisterForChanges(
[]libkbfs.FolderBranch{folderBranch}, f)
if err != nil {
return err
}
f.folderBranch = folderBranch
return nil
}
func (f *Folder) unsetFolderBranch(ctx context.Context) {
f.folderBranchMu.Lock()
defer f.folderBranchMu.Unlock()
if f.folderBranch == (libkbfs.FolderBranch{}) {
// Wasn't set.
return
}
err := f.list.fs.config.Notifier().UnregisterFromChanges([]libkbfs.FolderBranch{f.folderBranch}, f)
if err != nil {
f.fs.log.Info("cannot unregister change notifier for folder %q: %v",
f.name(), err)
}
f.folderBranch = libkbfs.FolderBranch{}
}
func (f *Folder) getFolderBranch() libkbfs.FolderBranch {
f.folderBranchMu.Lock()
defer f.folderBranchMu.Unlock()
return f.folderBranch
}
// forgetNode forgets a formerly active child with basename name.
func (f *Folder) forgetNode(ctx context.Context, node libkbfs.Node) {
f.mu.Lock()
defer f.mu.Unlock()
delete(f.nodes, node.GetID())
if len(f.nodes) == 0 && !f.noForget {
f.unsetFolderBranch(ctx)
f.list.forgetFolder(string(f.name()))
}
}
func (f *Folder) reportErr(ctx context.Context,
mode libkbfs.ErrorModeType, err error) {
if err == nil {
f.fs.log.CDebugf(ctx, "Request complete")
return
}
f.fs.config.Reporter().ReportErr(ctx, f.name(), f.list.public, mode, err)
// We just log the error as debug, rather than error, because it
// might just indicate an expected error such as an ENOENT.
//
// TODO: Classify errors and escalate the logging level of the
// important ones.
f.fs.log.CDebugf(ctx, err.Error())
}
func (f *Folder) lockedAddNode(node libkbfs.Node, val dokan.File) {
f.mu.Lock()
f.nodes[node.GetID()] = val
f.mu.Unlock()
}
// LocalChange is called for changes originating within in this process.
func (f *Folder) LocalChange(ctx context.Context, node libkbfs.Node, write libkbfs.WriteRange) {
f.fs.queueNotification(func() {})
}
// BatchChanges is called for changes originating anywhere, including
// other hosts.
func (f *Folder) BatchChanges(ctx context.Context, changes []libkbfs.NodeChange) {
f.fs.queueNotification(func() {})
}
// TlfHandleChange is called when the name of a folder changes.
func (f *Folder) TlfHandleChange(ctx context.Context,
newHandle *libkbfs.TlfHandle) {
// Handle in the background because we shouldn't lock during
// the notification
f.fs.queueNotification(func() {
oldName := func() libkbfs.CanonicalTlfName {
f.handleMu.Lock()
defer f.handleMu.Unlock()
oldName := f.h.GetCanonicalName()
f.h = newHandle
return oldName
}()
f.list.updateTlfName(ctx, string(oldName),
string(newHandle.GetCanonicalName()))
})
}
func (f *Folder) resolve(ctx context.Context) (*libkbfs.TlfHandle, error) {
// In case there were any unresolved assertions, try them again on
// the first load. Otherwise, since we haven't subscribed to
// updates yet for this folder, we might have missed a name
// change.
handle, err := f.h.ResolveAgain(ctx, f.fs.config.KBPKI())
if err != nil {
return nil, err
}
eq, err := f.h.Equals(f.fs.config.Codec(), *handle)
if err != nil {
return nil, err
}
if !eq {
// Make sure the name changes in the folder and the folder list
f.TlfHandleChange(ctx, handle)
}
return handle, nil
}
// Dir represents KBFS subdirectories.
type Dir struct {
FSO
}
func newDir(folder *Folder, node libkbfs.Node, name string, parent libkbfs.Node) *Dir {
d := &Dir{FSO{
name: name,
parent: parent,
folder: folder,
node: node,
}}
d.refcount.Increase()
return d
}
// GetFileInformation for dokan.
func (d *Dir) GetFileInformation(ctx context.Context, fi *dokan.FileInfo) (st *dokan.Stat, err error) {
d.folder.fs.logEnter(ctx, "Dir GetFileInformation")
defer func() { d.folder.reportErr(ctx, libkbfs.ReadMode, err) }()
return eiToStat(d.folder.fs.config.KBFSOps().Stat(ctx, d.node))
}
// SetFileAttributes for Dokan.
func (d *Dir) SetFileAttributes(ctx context.Context, fi *dokan.FileInfo, fileAttributes dokan.FileAttribute) error {
d.folder.fs.logEnter(ctx, "Dir SetFileAttributes")
// TODO handle attributes for real.
return nil
}
// isNoSuchNameError checks for libkbfs.NoSuchNameError.
func isNoSuchNameError(err error) bool {
_, ok := err.(libkbfs.NoSuchNameError)
return ok
}
// lastStr returns last string in a string slice or "" if the slice is empty.
func lastStr(strs []string) string {
if len(strs) == 0 {
return ""
}
return strs[len(strs)-1]
}
// open tries to open a file.
func (d *Dir) open(ctx context.Context, oc *openContext, path []string) (dokan.File, bool, error) {
d.folder.fs.log.CDebugf(ctx, "Dir openDir %v", path)
specialNode := handleTLFSpecialFile(lastStr(path), d.folder)
if specialNode != nil {
return oc.returnFileNoCleanup(specialNode)
}
origPath := path
rootDir := d
for len(path) > 0 {
// Handle upper case filenames from junctions etc
if c := lowerTranslateCandidate(oc, path[0]); c != "" {
var hit string
var nhits int
d.FindFiles(ctx, nil, c, func(ns *dokan.NamedStat) error {
if strings.ToLower(ns.Name) == c {
hit = ns.Name
nhits++
}
return nil
})
if nhits != 1 {
return nil, false, dokan.ErrObjectNameNotFound
}
path[0] = hit
}
leaf := len(path) == 1
// Check if this is a per-file metainformation file, if so
// return the corresponding SpecialReadFile.
if leaf && strings.HasPrefix(path[0], libfs.FileInfoPrefix) {
if err := oc.ReturningFileAllowed(); err != nil {
return nil, false, err
}
node, _, err := d.folder.fs.config.KBFSOps().Lookup(ctx, d.node, path[0][len(libfs.FileInfoPrefix):])
if err != nil {
return nil, false, err
}
nmd, err := d.folder.fs.config.KBFSOps().GetNodeMetadata(ctx, node)
if err != nil {
return nil, false, err
}
return &SpecialReadFile{read: fileInfo(nmd).read, fs: d.folder.fs}, false, nil
}
newNode, de, err := d.folder.fs.config.KBFSOps().Lookup(ctx, d.node, path[0])
// If we are in the final component, check if it is a creation.
if leaf {
notFound := isNoSuchNameError(err)
switch {
case notFound && oc.isCreateDirectory():
return d.mkdir(ctx, oc, path[0])
case notFound && oc.isCreation():
return d.create(ctx, oc, path[0])
case !notFound && oc.isExistingError():
return nil, false, dokan.ErrFileAlreadyExists
}
}
// Return errors from Lookup
if err != nil {
return nil, false, err
}
if newNode != nil {
d.folder.mu.Lock()
f, _ := d.folder.nodes[newNode.GetID()]
d.folder.mu.Unlock()
// Symlinks don't have stored nodes, so they are impossible here.
switch x := f.(type) {
default:
return nil, false, fmt.Errorf("unhandled node type: %T", f)
case nil:
case *File:
if err := oc.ReturningFileAllowed(); err != nil {
return nil, false, err
}
x.refcount.Increase()
return openFile(ctx, oc, path, x)
case *Dir:
d = x
path = path[1:]
continue
}
}
switch de.Type {
default:
return nil, false, fmt.Errorf("unhandled entry type: %v", de.Type)
case libkbfs.File, libkbfs.Exec:
if err := oc.ReturningFileAllowed(); err != nil {
return nil, false, err
}
child := newFile(d.folder, newNode, path[0], d.node)
f, _, err := openFile(ctx, oc, path, child)
if err == nil {
d.folder.lockedAddNode(newNode, child)
}
return f, false, err
case libkbfs.Dir:
child := newDir(d.folder, newNode, path[0], d.node)
d.folder.lockedAddNode(newNode, child)
d = child
path = path[1:]
case libkbfs.Sym:
return openSymlink(ctx, oc, d, rootDir, origPath, path, de.SymPath)
}
}
if err := oc.ReturningDirAllowed(); err != nil {
return nil, false, err
}
d.refcount.Increase()
return d, true, nil
}
type fileInfo libkbfs.NodeMetadata
func (fi fileInfo) read(ctx context.Context) ([]byte, time.Time, error) {
bs, err := libfs.PrettyJSON(fi)
return bs, time.Time{}, err
}
func openFile(ctx context.Context, oc *openContext, path []string, f *File) (dokan.File, bool, error) {
var err error
// Files only allowed as leafs...
if len(path) > 1 {
return nil, false, dokan.ErrObjectNameNotFound
}
if oc.isTruncate() {
err = f.folder.fs.config.KBFSOps().Truncate(ctx, f.node, 0)
}
if err != nil {
return nil, false, err
}
return f, false, nil
}
func openSymlink(ctx context.Context, oc *openContext, parent *Dir, rootDir *Dir, origPath, path []string, target string) (dokan.File, bool, error) {
// TODO handle file/directory type flags here from CreateOptions.
if !oc.reduceRedirectionsLeft() {
return nil, false, dokan.ErrObjectNameNotFound
}
// Take relevant prefix of original path.
origPath = origPath[:len(origPath)-len(path)]
if len(path) == 1 && oc.isOpenReparsePoint() {
// a Symlink is never included in Folder.nodes, as it doesn't
// have a libkbfs.Node to keep track of renames.
// Here we may get an error if the symlink destination does not exist.
// which is fine, treat such non-existing targets as symlinks to a file.
isDir, err := resolveSymlinkIsDir(ctx, oc, rootDir, origPath, target)
parent.folder.fs.log.CDebugf(ctx, "openSymlink leaf returned %v,%v => %v,%v", origPath, target, isDir, err)
return &Symlink{parent: parent, name: path[0], isTargetADirectory: isDir}, isDir, nil
}
// reference symlink, symbolic links always use '/' instead of '\'.
if target == "" || target[0] == '/' {
return nil, false, dokan.ErrNotSupported
}
dst, err := resolveSymlinkPath(ctx, origPath, target)
parent.folder.fs.log.CDebugf(ctx, "openSymlink resolve returned %v,%v => %v,%v", origPath, target, dst, err)
if err != nil {
return nil, false, err
}
dst = append(dst, path[1:]...)
return rootDir.open(ctx, oc, dst)
}
func getExclFromOpenContext(oc *openContext) libkbfs.Excl {
return libkbfs.Excl(oc.CreateDisposition == dokan.FileCreate)
}
func (d *Dir) create(ctx context.Context, oc *openContext, name string) (f dokan.File, isDir bool, err error) {
d.folder.fs.log.CDebugf(ctx, "Dir Create %s", name)
defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }()
isExec := false // Windows lacks executable modes.
excl := getExclFromOpenContext(oc)
newNode, _, err := d.folder.fs.config.KBFSOps().CreateFile(
ctx, d.node, name, isExec, excl)
if err != nil {
return nil, false, err
}
child := newFile(d.folder, newNode, name, d.node)
d.folder.lockedAddNode(newNode, child)
return child, false, nil
}
func (d *Dir) mkdir(ctx context.Context, oc *openContext, name string) (f *Dir, isDir bool, err error) {
d.folder.fs.log.CDebugf(ctx, "Dir Mkdir %s", name)
defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }()
newNode, _, err := d.folder.fs.config.KBFSOps().CreateDir(
ctx, d.node, name)
if err != nil {
return nil, false, err
}
child := newDir(d.folder, newNode, name, d.node)
d.folder.lockedAddNode(newNode, child)
return child, true, nil
}
// FindFiles does readdir for dokan.
func (d *Dir) FindFiles(ctx context.Context, fi *dokan.FileInfo, ignored string, callback func(*dokan.NamedStat) error) (err error) {
d.folder.fs.logEnter(ctx, "Dir FindFiles")
defer func() { d.folder.reportErr(ctx, libkbfs.ReadMode, err) }()
children, err := d.folder.fs.config.KBFSOps().GetDirChildren(ctx, d.node)
if err != nil {
return err
}
empty := true
var ns dokan.NamedStat
for name, de := range children {
empty = false
ns.Name = name
// TODO perhaps resolve symlinks here?
fillStat(&ns.Stat, &de)
err = callback(&ns)
if err != nil {
return err
}
}
if empty {
return dokan.ErrObjectNameNotFound
}
return nil
}
// CanDeleteDirectory - return just nil
// TODO check for permissions here.
func (d *Dir) CanDeleteDirectory(ctx context.Context, fi *dokan.FileInfo) (err error) {
d.folder.fs.logEnterf(ctx, "Dir CanDeleteDirectory %q", d.name)
defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }()
children, err := d.folder.fs.config.KBFSOps().GetDirChildren(ctx, d.node)
if err != nil {
return errToDokan(err)
}
if len(children) > 0 {
return dokan.ErrDirectoryNotEmpty
}
return nil
}
// Cleanup - forget references, perform deletions etc.
// If Cleanup is called with non-nil FileInfo that has IsDeleteOnClose()
// no libdokan locks should be held prior to the call.
func (d *Dir) Cleanup(ctx context.Context, fi *dokan.FileInfo) {
var err error
if fi != nil {
d.folder.fs.logEnterf(ctx, "Dir Cleanup %q delete=%v", d.name,
fi.IsDeleteOnClose())
} else {
d.folder.fs.logEnterf(ctx, "Dir Cleanup %q", d.name)
}
defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }()
if fi != nil && fi.IsDeleteOnClose() && d.parent != nil {
// renameAndDeletionLock should be the first lock to be grabbed in libdokan.
d.folder.fs.renameAndDeletionLock.Lock()
defer d.folder.fs.renameAndDeletionLock.Unlock()
d.folder.fs.log.CDebugf(ctx, "Removing (Delete) dir in cleanup %s", d.name)
err = d.folder.fs.config.KBFSOps().RemoveDir(ctx, d.parent, d.name)
}
if d.refcount.Decrease() {
d.folder.forgetNode(ctx, d.node)
}
}
func resolveSymlinkPath(ctx context.Context, origPath []string, targetPath string) ([]string, error) {
pathComponents := make([]string, len(origPath), len(origPath)+1)
copy(pathComponents, origPath)
for _, p := range strings.FieldsFunc(targetPath, isPathSeparator) {
switch p {
case ".":
case "..":
if len(pathComponents) == 0 {
return nil, dokan.ErrNotSupported
}
pathComponents = pathComponents[:len(pathComponents)-1]
default:
pathComponents = append(pathComponents, p)
}
}
return pathComponents, nil
}
func resolveSymlinkIsDir(ctx context.Context, oc *openContext, rootDir *Dir, origPath []string, targetPath string) (bool, error) {
dst, err := resolveSymlinkPath(ctx, origPath, targetPath)
if err != nil {
return false, err
}
obj, isDir, err := rootDir.open(ctx, oc, dst)
if err == nil {
obj.Cleanup(ctx, nil)
}
return isDir, err
}
func isPathSeparator(r rune) bool {
return r == '/' || r == '\\'
}
func asDir(ctx context.Context, f dokan.File) *Dir {
switch x := f.(type) {
case *Dir:
return x
case *TLF:
d, _, _ := x.loadDirHelper(ctx, "asDir", libkbfs.WriteMode, false)
return d
}
return nil
}
| 1 | 13,747 | I think we need to clear this out on logout, and update it on login, right? | keybase-kbfs | go |
@@ -43,6 +43,8 @@ gboolean
ot_util_filename_validate (const char *name,
GError **error)
{
+ if (name == NULL)
+ return glnx_throw (error, "Invalid NULL filename");
if (strcmp (name, ".") == 0)
return glnx_throw (error, "Invalid self-referential filename '.'");
if (strcmp (name, "..") == 0) | 1 | /*
* Copyright (C) 2011 Colin Walters <[email protected]>
*
* SPDX-License-Identifier: LGPL-2.0+
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Author: Colin Walters <[email protected]>
*/
#include "config.h"
#include "otutil.h"
#include <gio/gio.h>
#include <glib/gstdio.h>
#include <gio/gunixoutputstream.h>
#include <string.h>
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
/* Ensure that a pathname component @name does not contain the special Unix
* entries `.` or `..`, and does not contain `/`.
*/
gboolean
ot_util_filename_validate (const char *name,
GError **error)
{
if (strcmp (name, ".") == 0)
return glnx_throw (error, "Invalid self-referential filename '.'");
if (strcmp (name, "..") == 0)
return glnx_throw (error, "Invalid path uplink filename '..'");
if (strchr (name, '/') != NULL)
return glnx_throw (error, "Invalid / in filename %s", name);
if (!g_utf8_validate (name, -1, NULL))
return glnx_throw (error, "Invalid UTF-8 in filename %s", name);
return TRUE;
}
static GPtrArray *
ot_split_string_ptrarray (const char *str,
char c)
{
GPtrArray *ret = g_ptr_array_new_with_free_func (g_free);
const char *p;
do {
p = strchr (str, '/');
if (!p)
{
g_ptr_array_add (ret, g_strdup (str));
str = NULL;
}
else
{
g_ptr_array_add (ret, g_strndup (str, p - str));
str = p + 1;
}
} while (str && *str);
return ret;
}
/* Given a pathname @path, split it into individual entries in @out_components,
* validating that it does not have backreferences (`..`) etc.
*/
gboolean
ot_util_path_split_validate (const char *path,
GPtrArray **out_components,
GError **error)
{
if (strlen (path) > PATH_MAX)
return glnx_throw (error, "Path '%s' is too long", path);
g_autoptr(GPtrArray) ret_components = ot_split_string_ptrarray (path, '/');
/* Canonicalize by removing '.' and '', throw an error on .. */
for (int i = ret_components->len-1; i >= 0; i--)
{
const char *name = ret_components->pdata[i];
if (strcmp (name, "..") == 0)
return glnx_throw (error, "Invalid uplink '..' in path %s", path);
if (strcmp (name, ".") == 0 || name[0] == '\0')
g_ptr_array_remove_index (ret_components, i);
}
ot_transfer_out_value(out_components, &ret_components);
return TRUE;
}
| 1 | 16,766 | I think normally we'd make this a precondition (using e.g. `g_return_val_if_fail`), but meh, this works too! | ostreedev-ostree | c |
@@ -34,5 +34,5 @@ public final class Const {
public static final String SRC_MICROSERVICE = "x-cse-src-microservice";
- public static final String DEST_MICROSERVICE = "x-cse-dest-microservice";
+ public static final String TARGET_MICROSERVICE = "x-cse-target-microservice";
} | 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.core;
public final class Const {
private Const() {
}
public static final String CSE_CONTEXT = "x-cse-context";
public static final String RESTFUL = "rest";
public static final String ANY_TRANSPORT = "";
public static final String VERSION_RULE_LATEST = "latest";
public static final String DEFAULT_VERSION_RULE = VERSION_RULE_LATEST;
public static final String PRODUCER_OPERATION = "producer-operation";
public static final String SRC_MICROSERVICE = "x-cse-src-microservice";
public static final String DEST_MICROSERVICE = "x-cse-dest-microservice";
}
| 1 | 6,537 | Is this header used by other outside service? | apache-servicecomb-java-chassis | java |
@@ -89,10 +89,3 @@ class TestZipFolder(BZTestCase):
result_tree = set(filename[len(destination):] for filename in get_files_recursive(destination))
original_tree = set(filename[len(source):] for filename in get_files_recursive(source))
self.assertEqual(result_tree, original_tree)
-
- def test_no_work_prov(self):
- obj = Unpacker()
- obj.engine = EngineEmul()
- obj.engine.config[Provisioning.PROV] = 'cloud'
- obj.parameters.merge({Unpacker.FILES: ['notexists.zip']})
- obj.prepare() | 1 | import json
import os
import zipfile
from bzt.engine import Service, Provisioning
from bzt.modules.blazemeter import CloudProvisioning, BlazeMeterClientEmul
from bzt.modules.services import Unpacker
from bzt.utils import get_files_recursive
from tests import BZTestCase, __dir__
from tests.mocks import EngineEmul
class TestZipFolder(BZTestCase):
def test_pack_and_send_to_blazemeter(self):
obj = CloudProvisioning()
obj.engine = EngineEmul()
obj.engine.config.merge({
"execution": {
"executor": "selenium",
"concurrency": 5500,
"locations": {
"us-east-1": 1,
"us-west": 2},
"scenario": {
"script": __dir__() + "/../selenium/java_package"}},
"modules": {
"selenium": "bzt.modules.selenium.SeleniumExecutor",
"cloud": "bzt.modules.blazemeter.CloudProvisioning"},
"provisioning": "cloud"
})
obj.parameters = obj.engine.config['execution']
obj.settings["token"] = "FakeToken"
obj.client = client = BlazeMeterClientEmul(obj.log)
client.results.append({"result": []}) # collections
client.results.append({"result": []}) # tests
client.results.append(self.__get_user_info()) # user
client.results.append({"result": {"id": id(client)}}) # create test
client.results.append({"files": []}) # create test
client.results.append({}) # upload files
client.results.append({"result": {"id": id(obj)}}) # start
client.results.append({"result": {"id": id(obj)}}) # get master
client.results.append({"result": []}) # get master sessions
client.results.append({}) # terminate
obj.prepare()
unpack_cfgs = obj.engine.config.get(Service.SERV)
self.assertEqual(len(unpack_cfgs), 1)
self.assertEqual(unpack_cfgs[0]['module'], Unpacker.UNPACK)
self.assertEqual(unpack_cfgs[0][Unpacker.FILES], ['java_package.zip'])
self.assertTrue(zipfile.is_zipfile(obj.engine.artifacts_dir + '/java_package.zip'))
@staticmethod
def __get_user_info():
with open(__dir__() + "/../json/blazemeter-api-user.json") as fhd:
return json.loads(fhd.read())
def test_receive_and_unpack_on_worker(self):
obj = Unpacker()
obj.engine = EngineEmul()
obj.engine.config.merge({
"execution": {
"executor": "selenium",
"concurrency": 5500,
"scenario": {
"script": "java_package.zip"}},
"modules": {
"selenium": "bzt.modules.selenium.SeleniumExecutor",
"cloud": "bzt.modules.blazemeter.CloudProvisioning"},
"provisioning": "local"
})
obj.engine.file_search_paths = [obj.engine.artifacts_dir]
obj.parameters["files"] = ["java_package.zip"]
# create archive and put it in artifact dir
source = __dir__() + "/../selenium/java_package"
zip_name = obj.engine.create_artifact('java_package', '.zip')
with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_STORED) as zip_file:
for filename in get_files_recursive(source):
zip_file.write(filename, filename[len(os.path.dirname(source)):])
obj.prepare()
# check unpacked tree
destination = obj.engine.artifacts_dir + '/java_package'
result_tree = set(filename[len(destination):] for filename in get_files_recursive(destination))
original_tree = set(filename[len(source):] for filename in get_files_recursive(source))
self.assertEqual(result_tree, original_tree)
def test_no_work_prov(self):
obj = Unpacker()
obj.engine = EngineEmul()
obj.engine.config[Provisioning.PROV] = 'cloud'
obj.parameters.merge({Unpacker.FILES: ['notexists.zip']})
obj.prepare()
| 1 | 14,000 | that's fine by me to have simple test for provisioning check | Blazemeter-taurus | py |
@@ -83,7 +83,7 @@ public class TriggerBasedScheduleLoader implements ScheduleLoader {
ConditionChecker checker =
new BasicTimeChecker("BasicTimeChecker_1", s.getFirstSchedTime(),
s.getTimezone(), s.isRecurring(), s.skipPastOccurrences(),
- s.getPeriod());
+ s.getPeriod(), s.getCronExpression());
checkers.put(checker.getId(), checker);
String expr = checker.getId() + ".eval()";
Condition cond = new Condition(checkers, expr); | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.scheduler;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import azkaban.trigger.Condition;
import azkaban.trigger.ConditionChecker;
import azkaban.trigger.Trigger;
import azkaban.trigger.TriggerAction;
import azkaban.trigger.TriggerManager;
import azkaban.trigger.TriggerManagerAdapter;
import azkaban.trigger.TriggerManagerException;
import azkaban.trigger.builtin.BasicTimeChecker;
import azkaban.trigger.builtin.ExecuteFlowAction;
public class TriggerBasedScheduleLoader implements ScheduleLoader {
private static Logger logger = Logger
.getLogger(TriggerBasedScheduleLoader.class);
private TriggerManagerAdapter triggerManager;
private String triggerSource;
private long lastUpdateTime = -1;
public TriggerBasedScheduleLoader(TriggerManager triggerManager,
String triggerSource) {
this.triggerManager = triggerManager;
this.triggerSource = triggerSource;
}
private Trigger scheduleToTrigger(Schedule s) {
Condition triggerCondition = createTriggerCondition(s);
Condition expireCondition = createExpireCondition(s);
List<TriggerAction> actions = createActions(s);
Trigger t =
new Trigger(s.getScheduleId(), s.getLastModifyTime(),
s.getSubmitTime(), s.getSubmitUser(), triggerSource,
triggerCondition, expireCondition, actions);
if (s.isRecurring()) {
t.setResetOnTrigger(true);
} else {
t.setResetOnTrigger(false);
}
return t;
}
private List<TriggerAction> createActions(Schedule s) {
List<TriggerAction> actions = new ArrayList<TriggerAction>();
ExecuteFlowAction executeAct =
new ExecuteFlowAction("executeFlowAction", s.getProjectId(),
s.getProjectName(), s.getFlowName(), s.getSubmitUser(),
s.getExecutionOptions(), s.getSlaOptions());
actions.add(executeAct);
return actions;
}
private Condition createTriggerCondition(Schedule s) {
Map<String, ConditionChecker> checkers =
new HashMap<String, ConditionChecker>();
ConditionChecker checker =
new BasicTimeChecker("BasicTimeChecker_1", s.getFirstSchedTime(),
s.getTimezone(), s.isRecurring(), s.skipPastOccurrences(),
s.getPeriod());
checkers.put(checker.getId(), checker);
String expr = checker.getId() + ".eval()";
Condition cond = new Condition(checkers, expr);
return cond;
}
// if failed to trigger, auto expire?
private Condition createExpireCondition(Schedule s) {
Map<String, ConditionChecker> checkers =
new HashMap<String, ConditionChecker>();
ConditionChecker checker =
new BasicTimeChecker("BasicTimeChecker_2", s.getFirstSchedTime(),
s.getTimezone(), s.isRecurring(), s.skipPastOccurrences(),
s.getPeriod());
checkers.put(checker.getId(), checker);
String expr = checker.getId() + ".eval()";
Condition cond = new Condition(checkers, expr);
return cond;
}
@Override
public void insertSchedule(Schedule s) throws ScheduleManagerException {
Trigger t = scheduleToTrigger(s);
try {
triggerManager.insertTrigger(t, t.getSubmitUser());
s.setScheduleId(t.getTriggerId());
} catch (TriggerManagerException e) {
throw new ScheduleManagerException("Failed to insert new schedule!", e);
}
}
@Override
public void updateSchedule(Schedule s) throws ScheduleManagerException {
Trigger t = scheduleToTrigger(s);
try {
triggerManager.updateTrigger(t, t.getSubmitUser());
} catch (TriggerManagerException e) {
throw new ScheduleManagerException("Failed to update schedule!", e);
}
}
// TODO may need to add logic to filter out skip runs
@Override
public synchronized List<Schedule> loadSchedules()
throws ScheduleManagerException {
List<Trigger> triggers = triggerManager.getTriggers(triggerSource);
List<Schedule> schedules = new ArrayList<Schedule>();
for (Trigger t : triggers) {
lastUpdateTime = Math.max(lastUpdateTime, t.getLastModifyTime());
Schedule s = triggerToSchedule(t);
schedules.add(s);
System.out.println("loaded schedule for " + s.getProjectId()
+ s.getProjectName());
}
return schedules;
}
private Schedule triggerToSchedule(Trigger t) throws ScheduleManagerException {
Condition triggerCond = t.getTriggerCondition();
Map<String, ConditionChecker> checkers = triggerCond.getCheckers();
BasicTimeChecker ck = null;
for (ConditionChecker checker : checkers.values()) {
if (checker.getType().equals(BasicTimeChecker.type)) {
ck = (BasicTimeChecker) checker;
break;
}
}
List<TriggerAction> actions = t.getActions();
ExecuteFlowAction act = null;
for (TriggerAction action : actions) {
if (action.getType().equals(ExecuteFlowAction.type)) {
act = (ExecuteFlowAction) action;
break;
}
}
if (ck != null && act != null) {
Schedule s =
new Schedule(t.getTriggerId(), act.getProjectId(),
act.getProjectName(), act.getFlowName(),
t.getStatus().toString(), ck.getFirstCheckTime(),
ck.getTimeZone(), ck.getPeriod(), t.getLastModifyTime(),
ck.getNextCheckTime(), t.getSubmitTime(), t.getSubmitUser(),
act.getExecutionOptions(), act.getSlaOptions());
return s;
} else {
logger.error("Failed to parse schedule from trigger!");
throw new ScheduleManagerException(
"Failed to parse schedule from trigger!");
}
}
@Override
public void removeSchedule(Schedule s) throws ScheduleManagerException {
try {
triggerManager.removeTrigger(s.getScheduleId(), s.getSubmitUser());
} catch (TriggerManagerException e) {
throw new ScheduleManagerException(e.getMessage());
}
}
@Override
public void updateNextExecTime(Schedule s) throws ScheduleManagerException {
}
@Override
public synchronized List<Schedule> loadUpdatedSchedules()
throws ScheduleManagerException {
List<Trigger> triggers;
try {
triggers =
triggerManager.getTriggerUpdates(triggerSource, lastUpdateTime);
} catch (TriggerManagerException e) {
e.printStackTrace();
throw new ScheduleManagerException(e);
}
List<Schedule> schedules = new ArrayList<Schedule>();
for (Trigger t : triggers) {
lastUpdateTime = Math.max(lastUpdateTime, t.getLastModifyTime());
Schedule s = triggerToSchedule(t);
schedules.add(s);
System.out.println("loaded schedule for " + s.getProjectId()
+ s.getProjectName());
}
return schedules;
}
}
| 1 | 11,370 | These two methods seem to be identical except the names. Any idea why two methods are needed? | azkaban-azkaban | java |
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+
+import sys
+import os
+sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", ".."))
+from influxdb import InfluxDBClient
+import config
+
+try:
+ i = InfluxDBClient(host=config.INFLUX_HOST, port=config.INFLUX_PORT, database=config.INFLUX_TEST_DB)
+ i.create_database(config.INFLUX_TEST_DB)
+except Exception as e:
+ print("Creating influx DB failed: ", e) | 1 | 1 | 14,019 | I don't see much difference between this and `create_db` module. How about having a function with a `database` argument? | metabrainz-listenbrainz-server | py |
|
@@ -183,7 +183,7 @@ class AnalyticsDashboardWidget extends Component {
</div>
{ /* Data issue: on error display a notification. On missing data: display a CTA. */ }
{ ! receivingData && (
- error ? getDataErrorComponent( _x( 'Analytics', 'Service name', 'google-site-kit' ), error, true, true, true, errorObj ) : getNoDataComponent( _x( 'Analytics', 'Service name', 'google-site-kit' ), true, true, true )
+ error ? getDataErrorComponent( 'analytics', _x( 'Analytics', 'Service name', 'google-site-kit' ), error, true, true, true, errorObj ) : getNoDataComponent( _x( 'Analytics', 'Service name', 'google-site-kit' ), true, true, true )
) }
<div className={ classnames(
'mdc-layout-grid__cell', | 1 | /**
* AnalyticsDashboardWidget component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import classnames from 'classnames';
/**
* WordPress dependencies
*/
import { Component, Fragment } from '@wordpress/element';
import { __, _x, sprintf } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import Data from 'googlesitekit-data';
import Header from '../../../../components/header';
import AnalyticsDashboardWidgetSiteStats from './AnalyticsDashboardWidgetSiteStats';
import AnalyticsDashboardWidgetTopPagesTable from './AnalyticsDashboardWidgetTopPagesTable';
import AnalyticsDashboardWidgetOverview from './AnalyticsDashboardWidgetOverview';
import LegacyAnalyticsDashboardWidgetTopAcquisitionSources from './LegacyAnalyticsDashboardWidgetTopAcquisitionSources';
import Layout from '../../../../components/layout/layout';
import PageHeader from '../../../../components/page-header';
import LegacyDashboardAcquisitionPieChart from './LegacyDashboardAcquisitionPieChart';
import Alert from '../../../../components/alert';
import ProgressBar from '../../../../components/progress-bar';
import getNoDataComponent from '../../../../components/notifications/nodata';
import getDataErrorComponent from '../../../../components/notifications/data-error';
import AdSenseDashboardOutro from '../../../adsense/components/dashboard/AdSenseDashboardOutro';
import { isAdsenseConnectedAnalytics } from '../../../adsense/util';
import { getCurrentDateRange } from '../../../../util/date-range';
import HelpLink from '../../../../components/help-link';
import { STORE_NAME as CORE_USER } from '../../../../googlesitekit/datastore/user/constants';
const { withSelect } = Data;
class AnalyticsDashboardWidget extends Component {
constructor( props ) {
super( props );
this.state = {
selectedStats: [ 0 ],
receivingData: true,
error: false,
loading: true,
isAdSenseConnected: true,
};
this.handleStatSelection = this.handleStatSelection.bind( this );
this.buildSeries = this.buildSeries.bind( this );
this.handleDataError = this.handleDataError.bind( this );
this.handleDataSuccess = this.handleDataSuccess.bind( this );
}
componentDidMount() {
this.isAdSenseConnected();
}
async isAdSenseConnected() {
const adsenseConnect = await isAdsenseConnectedAnalytics();
if ( adsenseConnect ) {
this.setState( {
isAdSenseConnected: true,
} );
} else {
this.setState( {
isAdSenseConnected: false,
} );
}
}
handleStatSelection( stat ) {
this.setState( { selectedStats: [ stat ] } );
}
/**
* Handle data errors from the contained Analytics component(s).
*
* Currently handled in the AnalyticsDashboardWidget component.
*
* If this component's API data calls returns an error, the error message is passed to this callback, resulting in the display of an error Notification.
*
* If the component detects no data - in this case all 0s - the callback is called without an error message,
* resulting in the display of a CTA.
*
* @param {string} error A potential error string.
* @param {Object} errorObj Full error object.
*/
handleDataError( error, errorObj ) {
this.setState( {
receivingData: false,
loading: false,
error,
errorObj,
} );
}
/**
* Loading is set to false until data starts to resolve.
*/
handleDataSuccess() {
this.setState( {
receivingData: true,
loading: false,
} );
}
buildSeries() {
const { selectedStats } = this.state;
const colorMap = {
0: '#4285f4',
1: '#27bcd4',
2: '#1b9688',
3: '#673ab7',
};
return {
0: {
color: colorMap[ selectedStats ],
targetAxisIndex: 0,
},
1: {
color: colorMap[ selectedStats ],
targetAxisIndex: 0,
lineDashStyle: [ 3, 3 ],
lineWidth: 1,
},
};
}
render() {
const {
selectedStats,
error,
errorObj,
receivingData,
loading,
isAdSenseConnected,
} = this.state;
const {
dateRange,
} = this.props;
const series = this.buildSeries();
const vAxes = null;
// Hide Analytics data display when we don't have data.
const wrapperClass = ! loading && receivingData ? '' : 'googlesitekit-nodata';
const currentDateRange = getCurrentDateRange( dateRange );
return (
<Fragment>
<Header />
<Alert module="analytics" />
<div className="googlesitekit-module-page googlesitekit-module-page--analytics">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<PageHeader title={ _x( 'Analytics', 'Service name', 'google-site-kit' ) } icon iconWidth="24" iconHeight="26" iconID="analytics" status="connected" statusText={ __( 'Analytics is connected', 'google-site-kit' ) } />
{ loading && <ProgressBar /> }
</div>
{ /* Data issue: on error display a notification. On missing data: display a CTA. */ }
{ ! receivingData && (
error ? getDataErrorComponent( _x( 'Analytics', 'Service name', 'google-site-kit' ), error, true, true, true, errorObj ) : getNoDataComponent( _x( 'Analytics', 'Service name', 'google-site-kit' ), true, true, true )
) }
<div className={ classnames(
'mdc-layout-grid__cell',
'mdc-layout-grid__cell--span-12',
wrapperClass
) }>
<Layout
header
/* translators: %s: date range */
title={ sprintf( __( 'Audience overview for the last %s', 'google-site-kit' ), currentDateRange ) }
headerCtaLabel={ __( 'See full stats in Analytics', 'google-site-kit' ) }
headerCtaLink="http://analytics.google.com"
>
<AnalyticsDashboardWidgetOverview
selectedStats={ selectedStats }
handleStatSelection={ this.handleStatSelection }
handleDataError={ this.handleDataError }
handleDataSuccess={ this.handleDataSuccess }
/>
<AnalyticsDashboardWidgetSiteStats
selectedStats={ selectedStats }
series={ series }
vAxes={ vAxes }
dateRangeSlug={ dateRange }
/>
</Layout>
</div>
<div className={ classnames(
'mdc-layout-grid__cell',
'mdc-layout-grid__cell--span-12',
wrapperClass
) }>
<Layout
header
footer
/* translators: %s: date range */
title={ sprintf( __( 'Top content over the last %s', 'google-site-kit' ), currentDateRange ) }
headerCtaLink="https://analytics.google.com"
headerCtaLabel={ __( 'See full stats in Analytics', 'google-site-kit' ) }
footerCtaLabel={ _x( 'Analytics', 'Service name', 'google-site-kit' ) }
footerCtaLink="https://analytics.google.com"
>
<AnalyticsDashboardWidgetTopPagesTable />
</Layout>
</div>
<div className={ classnames(
'mdc-layout-grid__cell',
'mdc-layout-grid__cell--span-12',
wrapperClass
) }>
<Layout
header
footer
/* translators: %s: date range */
title={ sprintf( __( 'Top acquisition channels over the last %s', 'google-site-kit' ), currentDateRange ) }
headerCtaLink="https://analytics.google.com"
headerCtaLabel={ __( 'See full stats in Analytics', 'google-site-kit' ) }
footerCtaLabel={ _x( 'Analytics', 'Service name', 'google-site-kit' ) }
footerCtaLink="https://analytics.google.com"
>
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-4-desktop
mdc-layout-grid__cell--span-8-tablet
mdc-layout-grid__cell--span-4-phone
">
<LegacyDashboardAcquisitionPieChart />
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-8-desktop
mdc-layout-grid__cell--span-8-tablet
mdc-layout-grid__cell--span-4-phone
">
<LegacyAnalyticsDashboardWidgetTopAcquisitionSources />
</div>
</div>
</div>
</Layout>
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
mdc-layout-grid__cell--align-right
">
<HelpLink />
</div>
</div>
</div>
</div>
{ ! loading && ! isAdSenseConnected &&
<AdSenseDashboardOutro />
}
</Fragment>
);
}
}
export default withSelect(
( select ) => ( {
dateRange: select( CORE_USER ).getDateRange(),
} ),
)( AnalyticsDashboardWidget );
| 1 | 31,919 | See above, no need to pass the module name. | google-site-kit-wp | js |
@@ -316,7 +316,7 @@ class LGBMModel(_LGBMModelBase):
group : array-like or None, optional (default=None)
Group data of training data.
eval_set : list or None, optional (default=None)
- A list of (X, y) tuple pairs to use as a validation sets.
+ A list of (X, y) tuple pairs to use as a validation set.
eval_names : list of strings or None, optional (default=None)
Names of eval_set.
eval_sample_weight : list of arrays or None, optional (default=None) | 1 | # coding: utf-8
# pylint: disable = invalid-name, W0105, C0111, C0301
"""Scikit-Learn Wrapper interface for LightGBM."""
from __future__ import absolute_import
import numpy as np
import warnings
from .basic import Dataset, LightGBMError
from .compat import (SKLEARN_INSTALLED, _LGBMClassifierBase,
LGBMNotFittedError, _LGBMLabelEncoder, _LGBMModelBase,
_LGBMRegressorBase, _LGBMCheckXY, _LGBMCheckArray, _LGBMCheckConsistentLength,
_LGBMAssertAllFinite, _LGBMCheckClassificationTargets, _LGBMComputeSampleWeight,
argc_, range_, string_type, DataFrame)
from .engine import train
def _objective_function_wrapper(func):
"""Decorate an objective function
Note: for multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
func : callable
Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group):
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
group : array-like
Group/query data, used for ranking task.
Returns
-------
new_func : callable
The new objective function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes]
The predicted values.
dataset : ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``.
"""
def inner(preds, dataset):
"""internal function"""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
grad, hess = func(labels, preds)
elif argc == 3:
grad, hess = func(labels, preds, dataset.get_group())
else:
raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc)
"""weighted for objective"""
weight = dataset.get_weight()
if weight is not None:
"""only one class"""
if len(weight) == len(grad):
grad = np.multiply(grad, weight)
hess = np.multiply(hess, weight)
else:
num_data = len(weight)
num_class = len(grad) // num_data
if num_class * num_data != len(grad):
raise ValueError("Length of grad and hess should equal to num_class * num_data")
for k in range_(num_class):
for i in range_(num_data):
idx = k * num_data + i
grad[idx] *= weight[i]
hess[idx] *= weight[i]
return grad, hess
return inner
def _eval_function_wrapper(func):
"""Decorate an eval function
Note: for multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
Parameters
----------
func : callable
Expects a callable with following functions:
``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``
and return (eval_name->str, eval_result->float, is_bigger_better->Bool):
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
weight : array_like of shape = [n_samples]
The weight of samples.
group : array-like
Group/query data, used for ranking task.
Returns
-------
new_func : callable
The new eval function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes]
The predicted values.
dataset : ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``.
"""
def inner(preds, dataset):
"""internal function"""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
return func(labels, preds)
elif argc == 3:
return func(labels, preds, dataset.get_weight())
elif argc == 4:
return func(labels, preds, dataset.get_weight(), dataset.get_group())
else:
raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
return inner
class LGBMModel(_LGBMModelBase):
"""Implementation of the scikit-learn API for LightGBM."""
def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
learning_rate=0.1, n_estimators=100,
subsample_for_bin=200000, objective=None, class_weight=None,
min_split_gain=0., min_child_weight=1e-3, min_child_samples=20,
subsample=1., subsample_freq=0, colsample_bytree=1.,
reg_alpha=0., reg_lambda=0., random_state=None,
n_jobs=-1, silent=True, importance_type='split', **kwargs):
"""Construct a gradient boosting model.
Parameters
----------
boosting_type : string, optional (default="gbdt")
'gbdt', traditional Gradient Boosting Decision Tree.
'dart', Dropouts meet Multiple Additive Regression Trees.
'goss', Gradient-based One-Side Sampling.
'rf', Random Forest.
num_leaves : int, optional (default=31)
Maximum tree leaves for base learners.
max_depth : int, optional (default=-1)
Maximum tree depth for base learners, -1 means no limit.
learning_rate : float, optional (default=0.1)
Boosting learning rate.
You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
in training using ``reset_parameter`` callback.
Note, that this will ignore the ``learning_rate`` argument in training.
n_estimators : int, optional (default=100)
Number of boosted trees to fit.
subsample_for_bin : int, optional (default=200000)
Number of samples for constructing bins.
objective : string, callable or None, optional (default=None)
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
class_weight : dict, 'balanced' or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
Use this parameter only for multi-class classification task;
for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
The 'balanced' mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
If None, all classes are supposed to have weight one.
Note that these weights will be multiplied with ``sample_weight`` (passed through the fit method)
if ``sample_weight`` is specified.
min_split_gain : float, optional (default=0.)
Minimum loss reduction required to make a further partition on a leaf node of the tree.
min_child_weight : float, optional (default=1e-3)
Minimum sum of instance weight(hessian) needed in a child(leaf).
min_child_samples : int, optional (default=20)
Minimum number of data need in a child(leaf).
subsample : float, optional (default=1.)
Subsample ratio of the training instance.
subsample_freq : int, optional (default=0)
Frequence of subsample, <=0 means no enable.
colsample_bytree : float, optional (default=1.)
Subsample ratio of columns when constructing each tree.
reg_alpha : float, optional (default=0.)
L1 regularization term on weights.
reg_lambda : float, optional (default=0.)
L2 regularization term on weights.
random_state : int or None, optional (default=None)
Random number seed.
If None, default seeds in C++ code will be used.
n_jobs : int, optional (default=-1)
Number of parallel threads.
silent : bool, optional (default=True)
Whether to print messages while running boosting.
importance_type : string, optional (default='split')
The type of feature importance to be filled into ``feature_importances_``.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
**kwargs : other parameters
Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
Note
----
\\*\\*kwargs is not supported in sklearn, it may cause unexpected issues.
Attributes
----------
n_features_ : int
The number of features of fitted model.
classes_ : array of shape = [n_classes]
The class label array (only for classification problem).
n_classes_ : int
The number of classes (only for classification problem).
best_score_ : dict or None
The best score of fitted model.
best_iteration_ : int or None
The best iteration of fitted model if ``early_stopping_rounds`` has been specified.
objective_ : string or callable
The concrete objective used while fitting this model.
booster_ : Booster
The underlying Booster of this model.
evals_result_ : dict or None
The evaluation results if ``early_stopping_rounds`` has been specified.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
Note
----
A custom objective function can be provided for the ``objective``
parameter. In this case, it should have the signature
``objective(y_true, y_pred) -> grad, hess`` or
``objective(y_true, y_pred, group) -> grad, hess``:
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
group : array-like
Group/query data, used for ranking task.
grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the gradient for each sample point.
hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second derivative for each sample point.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
and you should group grad and hess in this way as well.
"""
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for this module')
self.boosting_type = boosting_type
self.objective = objective
self.num_leaves = num_leaves
self.max_depth = max_depth
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.subsample_for_bin = subsample_for_bin
self.min_split_gain = min_split_gain
self.min_child_weight = min_child_weight
self.min_child_samples = min_child_samples
self.subsample = subsample
self.subsample_freq = subsample_freq
self.colsample_bytree = colsample_bytree
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.random_state = random_state
self.n_jobs = n_jobs
self.silent = silent
self.importance_type = importance_type
self._Booster = None
self._evals_result = None
self._best_score = None
self._best_iteration = None
self._other_params = {}
self._objective = objective
self.class_weight = class_weight
self._n_features = None
self._classes = None
self._n_classes = None
self.set_params(**kwargs)
def get_params(self, deep=True):
params = super(LGBMModel, self).get_params(deep=deep)
params.update(self._other_params)
return params
# minor change to support `**kwargs`
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
if hasattr(self, '_' + key):
setattr(self, '_' + key, value)
self._other_params[key] = value
return self
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_group=None,
eval_metric=None, early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
"""Build a gradient boosting model from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input feature matrix.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in regression).
sample_weight : array-like of shape = [n_samples] or None, optional (default=None)
Weights of training data.
init_score : array-like of shape = [n_samples] or None, optional (default=None)
Init score of training data.
group : array-like or None, optional (default=None)
Group data of training data.
eval_set : list or None, optional (default=None)
A list of (X, y) tuple pairs to use as a validation sets.
eval_names : list of strings or None, optional (default=None)
Names of eval_set.
eval_sample_weight : list of arrays or None, optional (default=None)
Weights of eval data.
eval_class_weight : list or None, optional (default=None)
Class weights of eval data.
eval_init_score : list of arrays or None, optional (default=None)
Init score of eval data.
eval_group : list of arrays or None, optional (default=None)
Group data of eval data.
eval_metric : string, list of strings, callable or None, optional (default=None)
If string, it should be a built-in evaluation metric to use.
If callable, it should be a custom evaluation metric, see note below for more details.
In either case, the ``metric`` from the model parameters will be evaluated and used as well.
Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
early_stopping_rounds : int or None, optional (default=None)
Activates early stopping. The model will train until the validation score stops improving.
Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue training.
Requires at least one validation data and one metric.
If there's more than one, will check all of them. But the training data is ignored anyway.
verbose : bool, optional (default=True)
If True and an evaluation set is used, writes the evaluation progress.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider to use consecutive integers started from zero.
All negative values in categorical features will be treated as missing values.
callbacks : list of callback functions or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
Returns
-------
self : object
Returns self.
Note
----
Custom eval function expects a callable with following functions:
``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
``func(y_true, y_pred, weight, group)``.
Returns (eval_name, eval_result, is_bigger_better) or
list of (eval_name, eval_result, is_bigger_better)
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
weight : array-like of shape = [n_samples]
The weight of samples.
group : array-like
Group/query data, used for ranking task.
eval_name : string
The name of evaluation.
eval_result : float
The eval result.
is_bigger_better : bool
Is eval result bigger better, e.g. AUC is bigger_better.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
"""
if self._objective is None:
if isinstance(self, LGBMRegressor):
self._objective = "regression"
elif isinstance(self, LGBMClassifier):
self._objective = "binary"
elif isinstance(self, LGBMRanker):
self._objective = "lambdarank"
else:
raise ValueError("Unknown LGBMModel type.")
if callable(self._objective):
self._fobj = _objective_function_wrapper(self._objective)
else:
self._fobj = None
evals_result = {}
params = self.get_params()
# user can set verbose with kwargs, it has higher priority
if not any(verbose_alias in params for verbose_alias in ('verbose', 'verbosity')) and self.silent:
params['verbose'] = -1
params.pop('silent', None)
params.pop('importance_type', None)
params.pop('n_estimators', None)
params.pop('class_weight', None)
if self._n_classes is not None and self._n_classes > 2:
params['num_class'] = self._n_classes
if hasattr(self, '_eval_at'):
params['eval_at'] = self._eval_at
params['objective'] = self._objective
if self._fobj:
params['objective'] = 'None' # objective = nullptr for unknown objective
if callable(eval_metric):
feval = _eval_function_wrapper(eval_metric)
else:
feval = None
# register default metric for consistency with callable eval_metric case
original_metric = self._objective if isinstance(self._objective, string_type) else None
if original_metric is None:
# try to deduce from class instance
if isinstance(self, LGBMRegressor):
original_metric = "l2"
elif isinstance(self, LGBMClassifier):
original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
elif isinstance(self, LGBMRanker):
original_metric = "ndcg"
# overwrite default metric by explicitly set metric
for metric_alias in ['metric', 'metrics', 'metric_types']:
if metric_alias in params:
original_metric = params.pop(metric_alias)
# concatenate metric from params (or default if not provided in params) and eval_metric
original_metric = [original_metric] if isinstance(original_metric, (string_type, type(None))) else original_metric
eval_metric = [eval_metric] if isinstance(eval_metric, (string_type, type(None))) else eval_metric
params['metric'] = set(original_metric + eval_metric)
if not isinstance(X, DataFrame):
X, y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
_LGBMCheckConsistentLength(X, y, sample_weight)
if self.class_weight is not None:
class_sample_weight = _LGBMComputeSampleWeight(self.class_weight, y)
if sample_weight is None or len(sample_weight) == 0:
sample_weight = class_sample_weight
else:
sample_weight = np.multiply(sample_weight, class_sample_weight)
self._n_features = X.shape[1]
def _construct_dataset(X, y, sample_weight, init_score, group, params):
ret = Dataset(X, label=y, weight=sample_weight, group=group, params=params)
return ret.set_init_score(init_score)
train_set = _construct_dataset(X, y, sample_weight, init_score, group, params)
valid_sets = []
if eval_set is not None:
def _get_meta_data(collection, i):
if collection is None:
return None
elif isinstance(collection, list):
return collection[i] if len(collection) > i else None
elif isinstance(collection, dict):
return collection.get(i, None)
else:
raise TypeError('eval_sample_weight, eval_class_weight, eval_init_score, and eval_group '
'should be dict or list')
if isinstance(eval_set, tuple):
eval_set = [eval_set]
for i, valid_data in enumerate(eval_set):
# reduce cost for prediction training data
if valid_data[0] is X and valid_data[1] is y:
valid_set = train_set
else:
valid_weight = _get_meta_data(eval_sample_weight, i)
if _get_meta_data(eval_class_weight, i) is not None:
valid_class_sample_weight = _LGBMComputeSampleWeight(_get_meta_data(eval_class_weight, i),
valid_data[1])
if valid_weight is None or len(valid_weight) == 0:
valid_weight = valid_class_sample_weight
else:
valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
valid_init_score = _get_meta_data(eval_init_score, i)
valid_group = _get_meta_data(eval_group, i)
valid_set = _construct_dataset(valid_data[0], valid_data[1],
valid_weight, valid_init_score, valid_group, params)
valid_sets.append(valid_set)
self._Booster = train(params, train_set,
self.n_estimators, valid_sets=valid_sets, valid_names=eval_names,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, fobj=self._fobj, feval=feval,
verbose_eval=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
if evals_result:
self._evals_result = evals_result
if early_stopping_rounds is not None:
self._best_iteration = self._Booster.best_iteration
self._best_score = self._Booster.best_score
# free dataset
self.booster_.free_dataset()
del train_set, valid_sets
return self
def predict(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Return the predicted value for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
Note
----
If you want to get more explanation for your model's predictions using SHAP values
like SHAP interaction values,
you can install shap package (https://github.com/slundberg/shap).
**kwargs : other parameters for the prediction
Returns
-------
predicted_result : array-like of shape = [n_samples] or shape = [n_samples, n_classes]
The predicted values.
X_leaves : array-like of shape = [n_samples, n_trees] or shape [n_samples, n_trees * n_classes]
If ``pred_leaf=True``, the predicted leaf every tree for each sample.
X_SHAP_values : array-like of shape = [n_samples, n_features + 1] or shape [n_samples, (n_features + 1) * n_classes]
If ``pred_contrib=True``, the each feature contributions for each sample.
"""
if self._n_features is None:
raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.")
if not isinstance(X, DataFrame):
X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features_ is %s and "
"input n_features is %s "
% (self._n_features, n_features))
return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration,
pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs)
@property
def n_features_(self):
"""Get the number of features of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
return self._n_features
@property
def best_score_(self):
"""Get the best score of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
return self._best_score
@property
def best_iteration_(self):
"""Get the best iteration of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping_rounds beforehand.')
return self._best_iteration
@property
def objective_(self):
"""Get the concrete objective used while fitting this model."""
if self._n_features is None:
raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
return self._objective
@property
def booster_(self):
"""Get the underlying lightgbm Booster of this model."""
if self._Booster is None:
raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
return self._Booster
@property
def evals_result_(self):
"""Get the evaluation results."""
if self._n_features is None:
raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
return self._evals_result
@property
def feature_importances_(self):
"""Get feature importances.
Note
----
Feature importance in sklearn interface used to normalize to 1,
it's deprecated after 2.0.4 and is the same as Booster.feature_importance() now.
``importance_type`` attribute is passed to the function
to configure the type of importance values to be extracted.
"""
if self._n_features is None:
raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
return self.booster_.feature_importance(importance_type=self.importance_type)
class LGBMRegressor(LGBMModel, _LGBMRegressorBase):
"""LightGBM regressor."""
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_metric=None, early_stopping_rounds=None,
verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None):
super(LGBMRegressor, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
_base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]
+ _base_doc[_base_doc.find('eval_init_score :'):])
class LGBMClassifier(LGBMModel, _LGBMClassifierBase):
"""LightGBM classifier."""
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_metric=None,
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
_LGBMAssertAllFinite(y)
_LGBMCheckClassificationTargets(y)
self._le = _LGBMLabelEncoder().fit(y)
_y = self._le.transform(y)
self._classes = self._le.classes_
self._n_classes = len(self._classes)
if self._n_classes > 2:
# Switch to using a multiclass objective in the underlying LGBM instance
ova_aliases = ("multiclassova", "multiclass_ova", "ova", "ovr")
if self._objective not in ova_aliases and not callable(self._objective):
self._objective = "multiclass"
if eval_metric in ('logloss', 'binary_logloss'):
eval_metric = "multi_logloss"
elif eval_metric in ('error', 'binary_error'):
eval_metric = "multi_error"
else:
if eval_metric in ('logloss', 'multi_logloss'):
eval_metric = 'binary_logloss'
elif eval_metric in ('error', 'multi_error'):
eval_metric = 'binary_error'
if eval_set is not None:
if isinstance(eval_set, tuple):
eval_set = [eval_set]
for i, (valid_x, valid_y) in enumerate(eval_set):
if valid_x is X and valid_y is y:
eval_set[i] = (valid_x, _y)
else:
eval_set[i] = (valid_x, self._le.transform(valid_y))
super(LGBMClassifier, self).fit(X, _y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_class_weight=eval_class_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
fit.__doc__ = LGBMModel.fit.__doc__
def predict(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
result = self.predict_proba(X, raw_score, num_iteration,
pred_leaf, pred_contrib, **kwargs)
if raw_score or pred_leaf or pred_contrib:
return result
else:
class_index = np.argmax(result, axis=1)
return self._le.inverse_transform(class_index)
predict.__doc__ = LGBMModel.predict.__doc__
def predict_proba(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Return the predicted probability for each class for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
Note
----
If you want to get more explanation for your model's predictions using SHAP values
like SHAP interaction values,
you can install shap package (https://github.com/slundberg/shap).
**kwargs : other parameters for the prediction
Returns
-------
predicted_probability : array-like of shape = [n_samples, n_classes]
The predicted probability for each class for each sample.
X_leaves : array-like of shape = [n_samples, n_trees * n_classes]
If ``pred_leaf=True``, the predicted leaf every tree for each sample.
X_SHAP_values : array-like of shape = [n_samples, (n_features + 1) * n_classes]
If ``pred_contrib=True``, the each feature contributions for each sample.
"""
result = super(LGBMClassifier, self).predict(X, raw_score, num_iteration,
pred_leaf, pred_contrib, **kwargs)
if self._n_classes > 2 or pred_leaf or pred_contrib:
return result
else:
return np.vstack((1. - result, result)).transpose()
@property
def classes_(self):
"""Get the class label array."""
if self._classes is None:
raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
return self._classes
@property
def n_classes_(self):
"""Get the number of classes."""
if self._n_classes is None:
raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
return self._n_classes
class LGBMRanker(LGBMModel):
"""LightGBM ranker."""
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_group=None, eval_metric=None,
eval_at=[1], early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
# check group data
if group is None:
raise ValueError("Should set group for ranking task")
if eval_set is not None:
if eval_group is None:
raise ValueError("Eval_group cannot be None when eval_set is not None")
elif len(eval_group) != len(eval_set):
raise ValueError("Length of eval_group should be equal to eval_set")
elif (isinstance(eval_group, dict)
and any(i not in eval_group or eval_group[i] is None for i in range_(len(eval_group)))
or isinstance(eval_group, list)
and any(group is None for group in eval_group)):
raise ValueError("Should set group for all eval datasets for ranking task; "
"if you use dict, the index should start from 0")
self._eval_at = eval_at
super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, group=group,
eval_set=eval_set, eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_group=eval_group,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
_base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]
+ _base_doc[_base_doc.find('eval_init_score :'):])
_base_doc = fit.__doc__
_before_early_stop, _early_stop, _after_early_stop = _base_doc.partition('early_stopping_rounds :')
fit.__doc__ = (_before_early_stop
+ 'eval_at : list of int, optional (default=[1])\n'
+ ' ' * 12 + 'The evaluation positions of the specified metric.\n'
+ ' ' * 8 + _early_stop + _after_early_stop)
| 1 | 19,227 | LightGBM supports multiple validation sets, so please leave `sets`. | microsoft-LightGBM | cpp |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.