patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -230,7 +230,12 @@ public class ExecutionControllerUtils {
continue;
// case UNKNOWN:
case READY:
- node.setStatus(Status.KILLING);
+ // if flow status is EXECUTION_STOPPED due to e.g. pod failure, set sub node to KILLED
+ if (exFlow.getStatus()==Status.EXECUTION_STOPPED) {
+ node.setStatus(Status.KILLED);
+ } else {
+ node.setStatus(Status.KILLING);
+ }
break;
default:
node.setStatus(Status.FAILED); | 1 | /*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import static java.util.Objects.requireNonNull;
import azkaban.Constants.ConfigurationKeys;
import azkaban.alert.Alerter;
import azkaban.utils.AuthenticationUtils;
import azkaban.utils.Props;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utils for controlling executions.
*/
public class ExecutionControllerUtils {
private static final Logger logger = LoggerFactory.getLogger(
ExecutionControllerUtils.class);
private static final String SPARK_JOB_TYPE = "spark";
static final String OLD_APPLICATION_ID = "${application.id}";
// URLs coming from routing cluster info cannot use `${}` as a placeholder because it is already
// used for property substitution in Props class, which URLs are propagated through.
static final String NEW_APPLICATION_ID = "<application.id>";
// The regex to look for while fetching application ID from the Hadoop/Spark job log
private static final Pattern APPLICATION_ID_PATTERN = Pattern.compile("application_(\\d+_\\d+)");
// The regex to look for while validating the content from RM job link
private static final Pattern FAILED_TO_READ_APPLICATION_PATTERN = Pattern
.compile("Failed to read the application");
private static final Pattern INVALID_APPLICATION_ID_PATTERN = Pattern
.compile("Invalid Application ID");
/**
* If the current status of the execution is not one of the finished statuses, mark the execution
* as failed in the DB.
*
* @param executorLoader the executor loader
* @param alerterHolder the alerter holder
* @param flow the execution
* @param reason reason for finalizing the execution
* @param originalError the cause, if execution is being finalized because of an error
*/
public static void finalizeFlow(final ExecutorLoader executorLoader, final AlerterHolder
alerterHolder, final ExecutableFlow flow, final String reason,
@Nullable final Throwable originalError) {
boolean alertUser = true;
// First check if the execution in the datastore is finished.
try {
final ExecutableFlow dsFlow;
if (isFinished(flow)) {
dsFlow = flow;
} else {
dsFlow = executorLoader.fetchExecutableFlow(flow.getExecutionId());
// If it's marked finished, we're good. If not, we fail everything and then mark it
// finished.
if (!isFinished(dsFlow)) {
failEverything(dsFlow);
executorLoader.updateExecutableFlow(dsFlow);
}
}
if (flow.getEndTime() == -1) {
flow.setEndTime(System.currentTimeMillis());
executorLoader.updateExecutableFlow(dsFlow);
}
} catch (final ExecutorManagerException e) {
// If failed due to azkaban internal error, do not alert user.
alertUser = false;
logger.error("Failed to finalize flow " + flow.getExecutionId() + ", do not alert user.", e);
}
if (alertUser) {
alertUserOnFlowFinished(flow, alerterHolder, getFinalizeFlowReasons(reason, originalError));
}
}
/**
* When a flow is finished, alert the user as is configured in the execution options.
*
* @param flow the execution
* @param alerterHolder the alerter holder
* @param extraReasons the extra reasons for alerting
*/
public static void alertUserOnFlowFinished(final ExecutableFlow flow, final AlerterHolder
alerterHolder, final String[] extraReasons) {
final ExecutionOptions options = flow.getExecutionOptions();
final Alerter mailAlerter = alerterHolder.get("email");
if (flow.getStatus() != Status.SUCCEEDED) {
if (options.getFailureEmails() != null && !options.getFailureEmails().isEmpty()) {
try {
mailAlerter.alertOnError(flow, extraReasons);
} catch (final Exception e) {
logger.error("Failed to alert on error for execution " + flow.getExecutionId(), e);
}
}
if (options.getFlowParameters().containsKey("alert.type")) {
final String alertType = options.getFlowParameters().get("alert.type");
final Alerter alerter = alerterHolder.get(alertType);
if (alerter != null) {
try {
alerter.alertOnError(flow, extraReasons);
} catch (final Exception e) {
logger.error("Failed to alert on error by " + alertType + " for execution " + flow
.getExecutionId(), e);
}
} else {
logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert.");
}
}
} else {
if (options.getSuccessEmails() != null && !options.getSuccessEmails().isEmpty()) {
try {
mailAlerter.alertOnSuccess(flow);
} catch (final Exception e) {
logger.error("Failed to alert on success for execution " + flow.getExecutionId(), e);
}
}
if (options.getFlowParameters().containsKey("alert.type")) {
final String alertType = options.getFlowParameters().get("alert.type");
final Alerter alerter = alerterHolder.get(alertType);
if (alerter != null) {
try {
alerter.alertOnSuccess(flow);
} catch (final Exception e) {
logger.error("Failed to alert on success by " + alertType + " for execution " + flow
.getExecutionId(), e);
}
} else {
logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert.");
}
}
}
}
/**
* Alert the user when the flow has encountered the first error.
*
* @param flow the execution
* @param alerterHolder the alerter holder
*/
public static void alertUserOnFirstError(final ExecutableFlow flow,
final AlerterHolder alerterHolder) {
final ExecutionOptions options = flow.getExecutionOptions();
if (options.getNotifyOnFirstFailure()) {
logger.info("Alert on first error of execution " + flow.getExecutionId());
final Alerter mailAlerter = alerterHolder.get("email");
try {
mailAlerter.alertOnFirstError(flow);
} catch (final Exception e) {
logger.error("Failed to send first error email." + e.getMessage(), e);
}
if (options.getFlowParameters().containsKey("alert.type")) {
final String alertType = options.getFlowParameters().get("alert.type");
final Alerter alerter = alerterHolder.get(alertType);
if (alerter != null) {
try {
alerter.alertOnFirstError(flow);
} catch (final Exception e) {
logger.error("Failed to alert by " + alertType, e);
}
} else {
logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert.");
}
}
}
}
/**
* Get the reasons to finalize the flow.
*
* @param reason the reason
* @param originalError the original error
* @return the reasons to finalize the flow
*/
public static String[] getFinalizeFlowReasons(final String reason, final Throwable
originalError) {
final List<String> reasons = new LinkedList<>();
reasons.add(reason);
if (originalError != null) {
reasons.add(ExceptionUtils.getStackTrace(originalError));
}
return reasons.toArray(new String[reasons.size()]);
}
/**
* Set the flow status to failed and fail every node inside the flow.
*
* @param exFlow the executable flow
*/
public static void failEverything(final ExecutableFlow exFlow) {
final long time = System.currentTimeMillis();
for (final ExecutableNode node : exFlow.getExecutableNodes()) {
switch (node.getStatus()) {
case SUCCEEDED:
case FAILED:
case KILLED:
case SKIPPED:
case DISABLED:
continue;
// case UNKNOWN:
case READY:
node.setStatus(Status.KILLING);
break;
default:
node.setStatus(Status.FAILED);
break;
}
if (node.getStartTime() == -1) {
node.setStartTime(time);
}
if (node.getEndTime() == -1) {
node.setEndTime(time);
}
}
if (exFlow.getEndTime() == -1) {
exFlow.setEndTime(time);
}
if (!Status.isStatusFinished(exFlow.getStatus())) {
exFlow.setStatus(Status.FAILED);
}
}
/**
* Check if the flow status is finished.
*
* @param flow the executable flow
* @return the boolean
*/
public static boolean isFinished(final ExecutableFlow flow) {
switch (flow.getStatus()) {
case SUCCEEDED:
case FAILED:
case KILLED:
return true;
default:
return false;
}
}
/**
* Dynamically create the job link url. Construct the job link url from resource manager url.
* If it's valid, just return the job link url. Otherwise, construct the job link url from
* Hadoop/Spark job history server.
*
* @param exFlow The executable flow.
* @param jobId The job id.
* @param applicationId The application id.
* @param azkProps The azkaban props.
* @return the job link url.
*/
public static String createJobLinkUrl(final ExecutableFlow exFlow, final String jobId,
final String applicationId, final Props azkProps) {
if (applicationId == null) {
return null;
}
final ExecutableNode node = exFlow.getExecutableNodePath(jobId);
final boolean executableNodeFound = (node != null) ? true : false;
String resourceManagerJobUrl = null;
String sparkHistoryServerUrl = null;
String jobHistoryServerUrl = null;
final String applicationPlaceholder;
if (executableNodeFound && node.getClusterInfo() != null) {
// use the information of the cluster where the job is previously routed to
final ClusterInfo cluster = node.getClusterInfo();
applicationPlaceholder = NEW_APPLICATION_ID;
resourceManagerJobUrl = cluster.resourceManagerURL;
sparkHistoryServerUrl = cluster.sparkHistoryServerURL;
jobHistoryServerUrl = cluster.historyServerURL;
} else {
// fall back to web server's own configuration if cluster is missing for this job
applicationPlaceholder = OLD_APPLICATION_ID;
if (azkProps.containsKey(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL)) {
resourceManagerJobUrl = azkProps.getString(ConfigurationKeys.RESOURCE_MANAGER_JOB_URL);
}
if (azkProps.containsKey(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL)) {
sparkHistoryServerUrl = azkProps.getString(ConfigurationKeys.SPARK_HISTORY_SERVER_JOB_URL);
}
if (azkProps.containsKey(ConfigurationKeys.HISTORY_SERVER_JOB_URL)) {
jobHistoryServerUrl = azkProps.getString(ConfigurationKeys.HISTORY_SERVER_JOB_URL);
}
}
if (resourceManagerJobUrl == null || sparkHistoryServerUrl == null || jobHistoryServerUrl == null) {
logger.info("Missing Resource Manager, Spark History Server or History Server URL");
return null;
}
final URL url;
final String jobLinkUrl;
boolean isRMJobLinkValid = true;
try {
url = new URL(resourceManagerJobUrl.replace(applicationPlaceholder, applicationId));
final String keytabPrincipal = requireNonNull(
azkProps.getString(ConfigurationKeys.AZKABAN_KERBEROS_PRINCIPAL));
final String keytabPath = requireNonNull(azkProps.getString(ConfigurationKeys
.AZKABAN_KEYTAB_PATH));
final HttpURLConnection connection = AuthenticationUtils.loginAuthenticatedURL(url,
keytabPrincipal, keytabPath);
try (final BufferedReader in = new BufferedReader(
new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8))) {
String inputLine;
while ((inputLine = in.readLine()) != null) {
if (FAILED_TO_READ_APPLICATION_PATTERN.matcher(inputLine).find()) {
logger.info("RM job link has expired for application_" + applicationId);
isRMJobLinkValid = false;
break;
}
if (INVALID_APPLICATION_ID_PATTERN.matcher(inputLine).find()) {
logger.info("Invalid application id application_" + applicationId);
return null;
}
}
}
} catch (final Exception e) {
logger.error("Failed to get job link for application_" + applicationId, e);
return null;
}
if (isRMJobLinkValid) {
jobLinkUrl = url.toString();
} else {
// If RM job url has expired, build the url to the JHS or SHS instead.
if (!executableNodeFound) {
logger.error(
"Failed to create job url. Job " + jobId + " doesn't exist in " + exFlow
.getExecutionId());
return null;
}
if (node.getType().equals(SPARK_JOB_TYPE)) {
jobLinkUrl = sparkHistoryServerUrl.replace(applicationPlaceholder, applicationId);
} else {
jobLinkUrl = jobHistoryServerUrl.replace(applicationPlaceholder, applicationId);
}
}
logger.info("Job link url is " + jobLinkUrl + " for execution " + exFlow.getExecutionId() +
", job " + jobId);
return jobLinkUrl;
}
/**
* Find all the application ids the job log data contains by matching "application_<id>" pattern.
* Application ids are returned in the order they appear.
*
* @param logData The log data.
* @return The set of application ids found.
*/
public static Set<String> findApplicationIdsFromLog(final String logData) {
final Set<String> applicationIds = new LinkedHashSet<>();
final Matcher matcher = APPLICATION_ID_PATTERN.matcher(logData);
while (matcher.find()) {
final String appId = matcher.group(1);
applicationIds.add(appId);
}
logger.info("Application Ids found: " + applicationIds.toString());
return applicationIds;
}
/**
* Create a string by combining the cluster name with the execution Id.
*
* @param clusterName name of the azkaban cluster
* @param executionId execution id of a flow
* @return
*/
public static String clusterQualifiedExecId(final String clusterName, final int executionId) {
requireNonNull(clusterName, "cluster name must not be null");
return String.format("%s-%d", clusterName, executionId);
}
}
| 1 | 22,321 | Just to confirm, this will take care of all the nodes in graph. right? | azkaban-azkaban | java |
@@ -175,10 +175,16 @@ namespace TestPlatform.CrossPlatEngine.UnitTests.Hosting
public void GetTestHostProcessStartInfoShouldIncludeConnectionInfo()
{
var connectionInfo = new TestRunnerConnectionInfo { Port = 123 };
+ var parentProcessId = 101;
+
+ mockProcessHelper.Setup(ph => ph.GetCurrentProcessId()).Returns(parentProcessId);
var startInfo = this.dotnetHostManager.GetTestHostProcessStartInfo(this.testSource, null, connectionInfo);
StringAssert.Contains(startInfo.Arguments, "--port " + connectionInfo.Port);
+
+ StringAssert.Contains(startInfo.Arguments, string.Format("{0} {1}", Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Constants.ParentProcessIdOption, parentProcessId));
+
}
[TestMethod] | 1 | // Copyright (c) Microsoft. All rights reserved.
namespace TestPlatform.CrossPlatEngine.UnitTests.Hosting
{
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Security.Cryptography;
using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers.Interfaces;
using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Hosting;
using Microsoft.VisualStudio.TestPlatform.ObjectModel;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client.Interfaces;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Engine;
using Microsoft.VisualStudio.TestPlatform.Utilities.Helpers.Interfaces;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using Moq;
using System.Runtime.InteropServices;
[TestClass]
public class DotnetTestHostManagerTests
{
private const string DefaultDotnetPath = "c:\\tmp\\dotnet.exe";
private readonly Mock<ITestHostLauncher> mockTestHostLauncher;
private readonly TestableDotnetTestHostManager dotnetHostManager;
private readonly Mock<IProcessHelper> mockProcessHelper;
private readonly Mock<IFileHelper> mockFileHelper;
private readonly TestRunnerConnectionInfo defaultConnectionInfo;
private readonly string[] testSource = { "test.dll" };
public DotnetTestHostManagerTests()
{
this.mockTestHostLauncher = new Mock<ITestHostLauncher>();
this.mockProcessHelper = new Mock<IProcessHelper>();
this.mockFileHelper = new Mock<IFileHelper>();
this.defaultConnectionInfo = default(TestRunnerConnectionInfo);
this.dotnetHostManager = new TestableDotnetTestHostManager(
this.mockTestHostLauncher.Object,
this.mockProcessHelper.Object,
this.mockFileHelper.Object);
// Setup a dummy current process for tests
this.mockProcessHelper.Setup(ph => ph.GetCurrentProcessFileName()).Returns(DefaultDotnetPath);
this.mockProcessHelper.Setup(ph => ph.GetTestEngineDirectory()).Returns(DefaultDotnetPath);
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldThrowIfSourceIsNull()
{
Action action = () => this.dotnetHostManager.GetTestHostProcessStartInfo(null, null, this.defaultConnectionInfo);
Assert.ThrowsException<ArgumentNullException>(action);
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldThrowIfMultipleSourcesAreProvided()
{
var sources = new[] { "test1.dll", "test2.dll" };
Action action = () => this.dotnetHostManager.GetTestHostProcessStartInfo(sources, null, this.defaultConnectionInfo);
Assert.ThrowsException<InvalidOperationException>(action);
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldInvokeDotnetCommandline()
{
this.mockProcessHelper.Setup(ph => ph.GetCurrentProcessFileName()).Returns(DefaultDotnetPath);
var startInfo = this.GetDefaultStartInfo();
Assert.AreEqual(DefaultDotnetPath, startInfo.FileName);
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldInvokeDotnetXPlatOnLinux()
{
this.mockProcessHelper.Setup(ph => ph.GetCurrentProcessFileName()).Returns("/tmp/dotnet");
var startInfo = this.GetDefaultStartInfo();
Assert.AreEqual("/tmp/dotnet", startInfo.FileName);
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldInvokeDotnetOnWindows()
{
this.mockProcessHelper.Setup(ph => ph.GetCurrentProcessFileName()).Returns("c:\\tmp\\vstest.console.exe");
var startInfo = this.GetDefaultStartInfo();
Assert.AreEqual("dotnet.exe", startInfo.FileName);
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldInvokeDotnetExec()
{
var startInfo = this.GetDefaultStartInfo();
StringAssert.StartsWith(startInfo.Arguments, "exec");
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldAddRuntimeConfigJsonIfExists()
{
this.mockFileHelper.Setup(fh => fh.Exists("test.runtimeconfig.json")).Returns(true);
var startInfo = this.GetDefaultStartInfo();
StringAssert.Contains(startInfo.Arguments, "--runtimeconfig \"test.runtimeconfig.json\"");
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldNotAddRuntimeConfigJsonIfNotExists()
{
this.mockFileHelper.Setup(fh => fh.Exists("test.runtimeconfig.json")).Returns(false);
var startInfo = this.GetDefaultStartInfo();
Assert.IsFalse(startInfo.Arguments.Contains("--runtimeconfig \"test.runtimeconfig.json\""));
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldAddDepsFileJsonIfExists()
{
this.mockFileHelper.Setup(fh => fh.Exists("test.deps.json")).Returns(true);
var startInfo = this.GetDefaultStartInfo();
StringAssert.Contains(startInfo.Arguments, "--depsfile \"test.deps.json\"");
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldNotAddDepsFileJsonIfNotExists()
{
this.mockFileHelper.Setup(fh => fh.Exists("test.deps.json")).Returns(false);
var startInfo = this.GetDefaultStartInfo();
Assert.IsFalse(startInfo.Arguments.Contains("--depsfile \"test.deps.json\""));
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldProvidePathToTestHostForDesktopTarget()
{
this.mockProcessHelper.Setup(ph => ph.GetCurrentProcessFileName()).Returns("c:\\tmp\\vstest.console.exe");
var startInfo = this.GetDefaultStartInfo();
StringAssert.Contains(startInfo.Arguments, "c:\\tmp\\NetCore\\testhost.dll");
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldProvidePathToTestHostForNetCoreTarget()
{
this.mockProcessHelper.Setup(ph => ph.GetCurrentProcessFileName()).Returns("/tmp/dotnet");
this.mockProcessHelper.Setup(ph => ph.GetTestEngineDirectory()).Returns("/tmp/vstest");
var startInfo = this.GetDefaultStartInfo();
// Path.GetDirectoryName returns platform specific path separator char
StringAssert.Contains(startInfo.Arguments, this.GetTesthostPath("/tmp/vstest"));
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldIncludeConnectionInfo()
{
var connectionInfo = new TestRunnerConnectionInfo { Port = 123 };
var startInfo = this.dotnetHostManager.GetTestHostProcessStartInfo(this.testSource, null, connectionInfo);
StringAssert.Contains(startInfo.Arguments, "--port " + connectionInfo.Port);
}
[TestMethod]
public void GetTestHostProcessStartInfoShouldIncludeEnvironmentVariables()
{
var environmentVariables = new Dictionary<string, string> { { "k1", "v1" }, { "k2", "v2" } };
var startInfo = this.dotnetHostManager.GetTestHostProcessStartInfo(this.testSource, environmentVariables, this.defaultConnectionInfo);
Assert.AreEqual(environmentVariables, startInfo.EnvironmentVariables);
}
[TestMethod]
public void LaunchTestHostShouldLaunchProcessWithNullEnvironmentVariablesOrArgs()
{
this.mockTestHostLauncher.Setup(thl => thl.LaunchTestHost(It.IsAny<TestProcessStartInfo>())).Returns(111);
var startInfo = this.GetDefaultStartInfo();
var processId = this.dotnetHostManager.LaunchTestHost(startInfo);
Assert.AreEqual(111, processId);
}
[TestMethod]
public void LaunchTestHostShouldLaunchProcessWithConnectionInfo()
{
this.mockProcessHelper.Setup(ph => ph.GetTestEngineDirectory()).Returns("/tmp/vstest");
var startInfo = this.GetDefaultStartInfo();
var expectedArgs = "exec \"" + this.GetTesthostPath("/tmp/vstest") + "\" --port 0";
this.dotnetHostManager.LaunchTestHost(startInfo);
this.mockTestHostLauncher.Verify(thl => thl.LaunchTestHost(It.Is<TestProcessStartInfo>(x => x.Arguments.Equals(expectedArgs))), Times.Once);
}
[TestMethod]
public void LaunchTestHostShouldLaunchProcessWithEnvironmentVariables()
{
var variables = new Dictionary<string, string> { { "k1", "v1" }, { "k2", "v2" } };
var startInfo = new TestProcessStartInfo { EnvironmentVariables = variables };
this.dotnetHostManager.LaunchTestHost(startInfo);
this.mockTestHostLauncher.Verify(thl => thl.LaunchTestHost(It.Is<TestProcessStartInfo>(x => x.EnvironmentVariables.Equals(variables))), Times.Once);
}
[TestMethod]
public void DotnetTestHostManagedShouldNotBeShared()
{
Assert.IsFalse(this.dotnetHostManager.Shared);
}
[TestMethod]
public void GetTestHostProcessStartInfoOnWindowsForValidPathReturnsFullPathOfDotnetHost()
{
// To validate the else part, set current process to exe other than dotnet
this.mockProcessHelper.Setup(ph => ph.GetCurrentProcessFileName()).Returns("vstest.console.exe");
char separator = ';';
var dotnetExeName = "dotnet.exe";
if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
{
separator = ':';
dotnetExeName = "dotnet";
}
var paths = Environment.GetEnvironmentVariable("PATH").Split(separator);
var acceptablePath = Path.Combine(paths[0], dotnetExeName);
this.mockFileHelper.Setup(fh => fh.Exists(acceptablePath)).Returns(true);
var startInfo = this.GetDefaultStartInfo();
Assert.AreEqual(acceptablePath, startInfo.FileName);
}
[TestMethod]
public void GetTestHostProcessStartInfoOnWindowsForInValidPathReturnsDotnet()
{
// To validate the else part, set current process to exe other than dotnet
this.mockProcessHelper.Setup(ph => ph.GetCurrentProcessFileName()).Returns("vstest.console.exe");
var dotnetExeName = RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "dotnet.exe" : "dotnet";
var startInfo = this.GetDefaultStartInfo();
Assert.AreEqual(dotnetExeName, startInfo.FileName);
}
private string GetTesthostPath(string engineDirectory)
{
// testhost.dll will be picked up from the same path as vstest.console.dll. In the test, we are setting up
// the path to current assembly location.
return Path.Combine(engineDirectory, "testhost.dll");
}
private TestProcessStartInfo GetDefaultStartInfo()
{
var startInfo = this.dotnetHostManager.GetTestHostProcessStartInfo(
this.testSource,
null,
this.defaultConnectionInfo);
return startInfo;
}
}
internal class TestableDotnetTestHostManager : DotnetTestHostManager
{
public TestableDotnetTestHostManager(ITestHostLauncher testHostLauncher, IProcessHelper processHelper, IFileHelper fileHelper)
: base(testHostLauncher, processHelper, fileHelper)
{ }
}
[TestClass]
public class DefaultTestHostLauncherTests
{
[TestMethod]
public void DefaultTestHostLauncherIsDebugShouldBeFalse()
{
var hostLauncher = new DefaultTestHostLauncher();
Assert.IsFalse(hostLauncher.IsDebug);
}
[TestMethod]
public void DefaultTestHostLauncherShouldStartTestProcess()
{
var startInfo = new TestProcessStartInfo { FileName = "testhost.exe", Arguments = "a1", WorkingDirectory = "w" };
var currentProcess = Process.GetCurrentProcess();
var mockProcessHelper = new Mock<IProcessHelper>();
mockProcessHelper.Setup(ph => ph.LaunchProcess(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<string>()))
.Returns(currentProcess);
var hostLauncher = new DefaultTestHostLauncher(mockProcessHelper.Object);
var processId = hostLauncher.LaunchTestHost(startInfo);
Assert.AreEqual(currentProcess.Id, processId);
mockProcessHelper.Verify(ph => ph.LaunchProcess("testhost.exe", "a1", "w"), Times.Once);
}
}
} | 1 | 11,211 | nit: remove this line. All "arrange" may be in one code block. | microsoft-vstest | .cs |
@@ -14,10 +14,17 @@ import (
"runtime"
+ "bytes"
"restic/debug"
"restic/fs"
)
+// Xattr is a tuple storing the xattr name and value.
+type Xattr struct {
+ XattrName string `json:"xattrname"`
+ XattrValue []byte `json:"xattrvalue"`
+}
+
// Node is a file, directory or other item in a backup.
type Node struct {
Name string `json:"name"` | 1 | package restic
import (
"encoding/json"
"fmt"
"os"
"os/user"
"strconv"
"sync"
"syscall"
"time"
"restic/errors"
"runtime"
"restic/debug"
"restic/fs"
)
// Node is a file, directory or other item in a backup.
type Node struct {
Name string `json:"name"`
Type string `json:"type"`
Mode os.FileMode `json:"mode,omitempty"`
ModTime time.Time `json:"mtime,omitempty"`
AccessTime time.Time `json:"atime,omitempty"`
ChangeTime time.Time `json:"ctime,omitempty"`
UID uint32 `json:"uid"`
GID uint32 `json:"gid"`
User string `json:"user,omitempty"`
Group string `json:"group,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Size uint64 `json:"size,omitempty"`
Links uint64 `json:"links,omitempty"`
LinkTarget string `json:"linktarget,omitempty"`
Device uint64 `json:"device,omitempty"`
Content IDs `json:"content"`
Subtree *ID `json:"subtree,omitempty"`
Error string `json:"error,omitempty"`
Path string `json:"-"`
}
func (node Node) String() string {
switch node.Type {
case "file":
return fmt.Sprintf("%s %5d %5d %6d %s %s",
node.Mode, node.UID, node.GID, node.Size, node.ModTime, node.Name)
case "dir":
return fmt.Sprintf("%s %5d %5d %6d %s %s",
node.Mode|os.ModeDir, node.UID, node.GID, node.Size, node.ModTime, node.Name)
}
return fmt.Sprintf("<Node(%s) %s>", node.Type, node.Name)
}
// NodeFromFileInfo returns a new node from the given path and FileInfo.
func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
node := &Node{
Path: path,
Name: fi.Name(),
Mode: fi.Mode() & mask,
ModTime: fi.ModTime(),
}
node.Type = nodeTypeFromFileInfo(fi)
if node.Type == "file" {
node.Size = uint64(fi.Size())
}
err := node.fillExtra(path, fi)
return node, err
}
func nodeTypeFromFileInfo(fi os.FileInfo) string {
switch fi.Mode() & (os.ModeType | os.ModeCharDevice) {
case 0:
return "file"
case os.ModeDir:
return "dir"
case os.ModeSymlink:
return "symlink"
case os.ModeDevice | os.ModeCharDevice:
return "chardev"
case os.ModeDevice:
return "dev"
case os.ModeNamedPipe:
return "fifo"
case os.ModeSocket:
return "socket"
}
return ""
}
// CreateAt creates the node at the given path and restores all the meta data.
func (node *Node) CreateAt(path string, repo Repository) error {
debug.Log("create node %v at %v", node.Name, path)
switch node.Type {
case "dir":
if err := node.createDirAt(path); err != nil {
return err
}
case "file":
if err := node.createFileAt(path, repo); err != nil {
return err
}
case "symlink":
if err := node.createSymlinkAt(path); err != nil {
return err
}
case "dev":
if err := node.createDevAt(path); err != nil {
return err
}
case "chardev":
if err := node.createCharDevAt(path); err != nil {
return err
}
case "fifo":
if err := node.createFifoAt(path); err != nil {
return err
}
case "socket":
return nil
default:
return errors.Errorf("filetype %q not implemented!\n", node.Type)
}
err := node.restoreMetadata(path)
if err != nil {
debug.Log("restoreMetadata(%s) error %v", path, err)
}
return err
}
func (node Node) restoreMetadata(path string) error {
var err error
err = lchown(path, int(node.UID), int(node.GID))
if err != nil {
return errors.Wrap(err, "Lchown")
}
if node.Type != "symlink" {
err = fs.Chmod(path, node.Mode)
if err != nil {
return errors.Wrap(err, "Chmod")
}
}
if node.Type != "dir" {
err = node.RestoreTimestamps(path)
if err != nil {
debug.Log("error restoring timestamps for dir %v: %v", path, err)
return err
}
}
return nil
}
func (node Node) RestoreTimestamps(path string) error {
var utimes = [...]syscall.Timespec{
syscall.NsecToTimespec(node.AccessTime.UnixNano()),
syscall.NsecToTimespec(node.ModTime.UnixNano()),
}
if node.Type == "symlink" {
return node.restoreSymlinkTimestamps(path, utimes)
}
if err := syscall.UtimesNano(path, utimes[:]); err != nil {
return errors.Wrap(err, "UtimesNano")
}
return nil
}
func (node Node) createDirAt(path string) error {
err := fs.Mkdir(path, node.Mode)
if err != nil && !os.IsExist(err) {
return errors.Wrap(err, "Mkdir")
}
return nil
}
func (node Node) createFileAt(path string, repo Repository) error {
f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
defer f.Close()
if err != nil {
return errors.Wrap(err, "OpenFile")
}
var buf []byte
for _, id := range node.Content {
size, err := repo.LookupBlobSize(id, DataBlob)
if err != nil {
return err
}
buf = buf[:cap(buf)]
if uint(len(buf)) < size {
buf = NewBlobBuffer(int(size))
}
n, err := repo.LoadBlob(DataBlob, id, buf)
if err != nil {
return err
}
buf = buf[:n]
_, err = f.Write(buf)
if err != nil {
return errors.Wrap(err, "Write")
}
}
return nil
}
func (node Node) createSymlinkAt(path string) error {
// Windows does not allow non-admins to create soft links.
if runtime.GOOS == "windows" {
return nil
}
err := fs.Symlink(node.LinkTarget, path)
if err != nil {
return errors.Wrap(err, "Symlink")
}
return nil
}
func (node *Node) createDevAt(path string) error {
return mknod(path, syscall.S_IFBLK|0600, int(node.Device))
}
func (node *Node) createCharDevAt(path string) error {
return mknod(path, syscall.S_IFCHR|0600, int(node.Device))
}
func (node *Node) createFifoAt(path string) error {
return mkfifo(path, 0600)
}
func (node Node) MarshalJSON() ([]byte, error) {
if node.ModTime.Year() < 0 || node.ModTime.Year() > 9999 {
err := errors.Errorf("node %v has invalid ModTime year %d: %v",
node.Path, node.ModTime.Year(), node.ModTime)
return nil, err
}
if node.ChangeTime.Year() < 0 || node.ChangeTime.Year() > 9999 {
err := errors.Errorf("node %v has invalid ChangeTime year %d: %v",
node.Path, node.ChangeTime.Year(), node.ChangeTime)
return nil, err
}
if node.AccessTime.Year() < 0 || node.AccessTime.Year() > 9999 {
err := errors.Errorf("node %v has invalid AccessTime year %d: %v",
node.Path, node.AccessTime.Year(), node.AccessTime)
return nil, err
}
type nodeJSON Node
nj := nodeJSON(node)
name := strconv.Quote(node.Name)
nj.Name = name[1 : len(name)-1]
return json.Marshal(nj)
}
func (node *Node) UnmarshalJSON(data []byte) error {
type nodeJSON Node
nj := (*nodeJSON)(node)
err := json.Unmarshal(data, nj)
if err != nil {
return errors.Wrap(err, "Unmarshal")
}
nj.Name, err = strconv.Unquote(`"` + nj.Name + `"`)
return errors.Wrap(err, "Unquote")
}
func (node Node) Equals(other Node) bool {
if node.Name != other.Name {
return false
}
if node.Type != other.Type {
return false
}
if node.Mode != other.Mode {
return false
}
if node.ModTime != other.ModTime {
return false
}
if node.AccessTime != other.AccessTime {
return false
}
if node.ChangeTime != other.ChangeTime {
return false
}
if node.UID != other.UID {
return false
}
if node.GID != other.GID {
return false
}
if node.User != other.User {
return false
}
if node.Group != other.Group {
return false
}
if node.Inode != other.Inode {
return false
}
if node.Size != other.Size {
return false
}
if node.Links != other.Links {
return false
}
if node.LinkTarget != other.LinkTarget {
return false
}
if node.Device != other.Device {
return false
}
if !node.sameContent(other) {
return false
}
if node.Subtree != nil {
if other.Subtree == nil {
return false
}
if !node.Subtree.Equal(*other.Subtree) {
return false
}
} else {
if other.Subtree != nil {
return false
}
}
if node.Error != other.Error {
return false
}
return true
}
func (node Node) sameContent(other Node) bool {
if node.Content == nil {
return other.Content == nil
}
if other.Content == nil {
return false
}
if len(node.Content) != len(other.Content) {
return false
}
for i := 0; i < len(node.Content); i++ {
if !node.Content[i].Equal(other.Content[i]) {
return false
}
}
return true
}
// IsNewer returns true of the file has been updated since the last Stat().
func (node *Node) IsNewer(path string, fi os.FileInfo) bool {
if node.Type != "file" {
debug.Log("node %v is newer: not file", path)
return true
}
tpe := nodeTypeFromFileInfo(fi)
if node.Name != fi.Name() || node.Type != tpe {
debug.Log("node %v is newer: name or type changed", path)
return true
}
size := uint64(fi.Size())
extendedStat, ok := toStatT(fi.Sys())
if !ok {
if node.ModTime != fi.ModTime() ||
node.Size != size {
debug.Log("node %v is newer: timestamp or size changed", path)
return true
}
return false
}
inode := extendedStat.ino()
if node.ModTime != fi.ModTime() ||
node.ChangeTime != changeTime(extendedStat) ||
node.Inode != uint64(inode) ||
node.Size != size {
debug.Log("node %v is newer: timestamp, size or inode changed", path)
return true
}
debug.Log("node %v is not newer", path)
return false
}
func (node *Node) fillUser(stat statT) error {
node.UID = stat.uid()
node.GID = stat.gid()
username, err := lookupUsername(strconv.Itoa(int(stat.uid())))
if err != nil {
return err
}
node.User = username
return nil
}
var (
uidLookupCache = make(map[string]string)
uidLookupCacheMutex = sync.RWMutex{}
)
func lookupUsername(uid string) (string, error) {
uidLookupCacheMutex.RLock()
value, ok := uidLookupCache[uid]
uidLookupCacheMutex.RUnlock()
if ok {
return value, nil
}
username := ""
u, err := user.LookupId(uid)
if err == nil {
username = u.Username
}
uidLookupCacheMutex.Lock()
uidLookupCache[uid] = username
uidLookupCacheMutex.Unlock()
return username, nil
}
func (node *Node) fillExtra(path string, fi os.FileInfo) error {
stat, ok := toStatT(fi.Sys())
if !ok {
return nil
}
node.Inode = uint64(stat.ino())
node.fillTimes(stat)
var err error
if err = node.fillUser(stat); err != nil {
return err
}
switch node.Type {
case "file":
node.Size = uint64(stat.size())
node.Links = uint64(stat.nlink())
case "dir":
case "symlink":
node.LinkTarget, err = fs.Readlink(path)
err = errors.Wrap(err, "Readlink")
case "dev":
node.Device = uint64(stat.rdev())
case "chardev":
node.Device = uint64(stat.rdev())
case "fifo":
case "socket":
default:
err = errors.Errorf("invalid node type %q", node.Type)
}
return err
}
type statT interface {
dev() uint64
ino() uint64
nlink() uint64
uid() uint32
gid() uint32
rdev() uint64
size() int64
atim() syscall.Timespec
mtim() syscall.Timespec
ctim() syscall.Timespec
}
func mkfifo(path string, mode uint32) (err error) {
return mknod(path, mode|syscall.S_IFIFO, 0)
}
func (node *Node) fillTimes(stat statT) {
ctim := stat.ctim()
atim := stat.atim()
node.ChangeTime = time.Unix(ctim.Unix())
node.AccessTime = time.Unix(atim.Unix())
}
func changeTime(stat statT) time.Time {
ctim := stat.ctim()
return time.Unix(ctim.Unix())
}
| 1 | 7,751 | This name is not a good fit, what about `ExtendedAttribute`? Or even just `Attribute`? | restic-restic | go |
@@ -0,0 +1,18 @@
+#include "test_assert.h"
+
+int testing_fails = 0;
+
+void TestFail(const char *expval, const char *val, const char *exp,
+ const char *file, int line) {
+ TEST_OUTPUT_LINE("VALUE: \"%s\"", expval);
+ TEST_OUTPUT_LINE("EXPECTED: \"%s\"", val);
+ TEST_OUTPUT_LINE("TEST FAILED: %s:%d, %s", file, line, exp);
+ assert(0);
+ testing_fails++;
+}
+
+void TestEqStr(const char *expval, const char *val, const char *exp,
+ const char *file, int line) {
+ if (strcmp(expval, val) != 0) { TestFail(expval, val, exp, file, line); }
+}
+ | 1 | 1 | 13,989 | lets keep our "testing framework" header only if possible. since there is an `assert(0)` in there, it is intended to halt on the first test failure. | google-flatbuffers | java |
|
@@ -26,9 +26,8 @@ import (
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
- accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
+ "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
- "github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rolldpos
import (
"encoding/hex"
"fmt"
"math/big"
"net"
"sync"
"testing"
"time"
"github.com/facebookgo/clock"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/action/protocol/vote"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
cp "github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/endorsement"
"github.com/iotexproject/iotex-core/p2p/node"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/protogen/iotextypes"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_actpool"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/test/testaddress"
"github.com/iotexproject/iotex-core/testutil"
)
type addrKeyPair struct {
pubKey keypair.PublicKey
priKey keypair.PrivateKey
encodedAddr string
}
func TestNewRollDPoS(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cfg := config.Default
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
t.Run("normal", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetBlockchain(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
})
t.Run("mock-clock", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetBlockchain(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock.NewMock()).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
_, ok := r.ctx.clock.(*clock.Mock)
assert.True(t, ok)
})
t.Run("root chain API", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetBlockchain(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock.NewMock()).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
})
t.Run("missing-dep", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
RegisterProtocol(rp).
Build()
assert.Error(t, err)
assert.Nil(t, r)
})
}
func makeBlock(t *testing.T, accountIndex, numOfEndosements int, makeInvalidEndorse bool, height int) *block.Block {
unixTime := 1500000000
blkTime := int64(-1)
if height != 9 {
height = 9
blkTime = int64(-7723372030)
}
timeT := time.Unix(blkTime, 0)
rap := block.RunnableActionsBuilder{}
ra := rap.
SetHeight(uint64(height)).
SetTimeStamp(timeT).
Build(identityset.PrivateKey(accountIndex).PublicKey())
blk, err := block.NewBuilder(ra).
SetVersion(1).
SetReceiptRoot(hash.Hash256b([]byte("hello, world!"))).
SetDeltaStateDigest(hash.Hash256b([]byte("world, hello!"))).
SetPrevBlockHash(hash.Hash256b([]byte("hello, block!"))).
SignAndBuild(identityset.PrivateKey(accountIndex))
require.NoError(t, err)
footerForBlk := &block.Footer{}
typesFooter := iotextypes.BlockFooter{}
for i := 0; i < numOfEndosements; i++ {
timeTime := time.Unix(int64(unixTime), 0)
hs := blk.HashBlock()
var consensusVote *ConsensusVote
if makeInvalidEndorse {
consensusVote = NewConsensusVote(hs[:], LOCK)
} else {
consensusVote = NewConsensusVote(hs[:], COMMIT)
}
en, err := endorsement.Endorse(identityset.PrivateKey(i), consensusVote, timeTime)
require.NoError(t, err)
enProto, err := en.Proto()
require.NoError(t, err)
typesFooter.Endorsements = append(typesFooter.Endorsements, enProto)
}
ts, err := ptypes.TimestampProto(time.Unix(int64(unixTime), 0))
require.NoError(t, err)
typesFooter.Timestamp = ts
require.NotNil(t, typesFooter.Timestamp)
err = footerForBlk.ConvertFromBlockFooterPb(&typesFooter)
require.NoError(t, err)
blk.Footer = *footerForBlk
return &blk
}
func TestValidateBlockFooter(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
candidates := make([]string, 5)
for i := 0; i < len(candidates); i++ {
candidates[i] = identityset.Address(i).String()
}
clock := clock.NewMock()
blockHeight := uint64(8)
footer := &block.Footer{}
blockchain := mock_blockchain.NewMockBlockchain(ctrl)
blockchain.EXPECT().GenesisTimestamp().Return(int64(1500000000)).Times(5)
blockchain.EXPECT().BlockFooterByHeight(blockHeight).Return(footer, nil).Times(5)
blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return([]*state.Candidate{
{Address: candidates[0]},
{Address: candidates[1]},
{Address: candidates[2]},
{Address: candidates[3]},
{Address: candidates[4]},
}, nil).AnyTimes()
sk1 := identityset.PrivateKey(1)
cfg := config.Default
cfg.Genesis.NumDelegates = 4
cfg.Genesis.NumSubEpochs = 1
cfg.Genesis.BlockInterval = 10 * time.Second
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(1).String()).
SetPriKey(sk1).
SetBlockchain(blockchain).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
require.NotNil(t, r)
// all right
blk := makeBlock(t, 1, 4, false, 9)
err = r.ValidateBlockFooter(blk)
require.NoError(t, err)
// Proposer is wrong
blk = makeBlock(t, 0, 4, false, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// Not enough endorsements
blk = makeBlock(t, 1, 2, false, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// round information is wrong
blk = makeBlock(t, 1, 4, false, 0)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// Some endorsement is invalid
blk = makeBlock(t, 1, 4, true, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
}
func TestRollDPoS_Metrics(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
candidates := make([]string, 5)
for i := 0; i < len(candidates); i++ {
candidates[i] = identityset.Address(i).String()
}
clock := clock.NewMock()
blockHeight := uint64(8)
footer := &block.Footer{}
blockchain := mock_blockchain.NewMockBlockchain(ctrl)
blockchain.EXPECT().TipHeight().Return(blockHeight).Times(1)
blockchain.EXPECT().GenesisTimestamp().Return(int64(1500000000)).Times(2)
blockchain.EXPECT().BlockFooterByHeight(blockHeight).Return(footer, nil).Times(2)
blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return([]*state.Candidate{
{Address: candidates[0]},
{Address: candidates[1]},
{Address: candidates[2]},
{Address: candidates[3]},
{Address: candidates[4]},
}, nil).AnyTimes()
sk1 := identityset.PrivateKey(1)
cfg := config.Default
cfg.Genesis.NumDelegates = 4
cfg.Genesis.NumSubEpochs = 1
cfg.Genesis.BlockInterval = 10 * time.Second
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(1).String()).
SetPriKey(sk1).
SetBlockchain(blockchain).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
require.NotNil(t, r)
clock.Add(r.ctx.RoundCalc().BlockInterval())
r.ctx.round, err = r.ctx.RoundCalc().UpdateRound(r.ctx.round, blockHeight+1, clock.Now())
require.NoError(t, err)
m, err := r.Metrics()
require.NoError(t, err)
assert.Equal(t, uint64(3), m.LatestEpoch)
cp.SortCandidates(candidates, rp.GetEpochHeight(m.LatestEpoch), cp.CryptoSeed)
assert.Equal(t, candidates[:4], m.LatestDelegates)
assert.Equal(t, candidates[1], m.LatestBlockProducer)
}
// E2E RollDPoS tests bellow
type directOverlay struct {
addr net.Addr
peers map[net.Addr]*RollDPoS
}
func (o *directOverlay) Start(_ context.Context) error { return nil }
func (o *directOverlay) Stop(_ context.Context) error { return nil }
func (o *directOverlay) Broadcast(msg proto.Message) error {
// Only broadcast consensus message
if cMsg, ok := msg.(*iotextypes.ConsensusMessage); ok {
for _, r := range o.peers {
if err := r.HandleConsensusMsg(cMsg); err != nil {
return errors.Wrap(err, "error when handling consensus message directly")
}
}
}
return nil
}
func (o *directOverlay) Tell(uint32, net.Addr, proto.Message) error { return nil }
func (o *directOverlay) Self() net.Addr { return o.addr }
func (o *directOverlay) GetPeers() []net.Addr {
addrs := make([]net.Addr, 0, len(o.peers))
for addr := range o.peers {
addrs = append(addrs, addr)
}
return addrs
}
func TestRollDPoSConsensus(t *testing.T) {
newConsensusComponents := func(numNodes int) ([]*RollDPoS, []*directOverlay, []blockchain.Blockchain) {
cfg := config.Default
cfg.Consensus.RollDPoS.Delay = 300 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptBlockTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptProposalEndorsementTTL = 200 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptLockEndorsementTTL = 200 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.CommitTTL = 200 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.UnmatchedEventTTL = time.Second
cfg.Consensus.RollDPoS.FSM.UnmatchedEventInterval = 10 * time.Millisecond
cfg.Consensus.RollDPoS.ToleratedOvertime = 200 * time.Millisecond
cfg.Genesis.BlockInterval = time.Second
cfg.Genesis.Blockchain.NumDelegates = uint64(numNodes)
cfg.Genesis.Blockchain.NumSubEpochs = 1
chainAddrs := make([]*addrKeyPair, 0, numNodes)
networkAddrs := make([]net.Addr, 0, numNodes)
for i := 0; i < numNodes; i++ {
sk := identityset.PrivateKey(i)
addr := addrKeyPair{
encodedAddr: identityset.Address(i).String(),
pubKey: sk.PublicKey(),
priKey: sk,
}
chainAddrs = append(chainAddrs, &addr)
networkAddrs = append(networkAddrs, node.NewTCPNode(fmt.Sprintf("127.0.0.%d:4689", i+1)))
}
chainRawAddrs := make([]string, 0, numNodes)
addressMap := make(map[string]*addrKeyPair)
for _, addr := range chainAddrs {
chainRawAddrs = append(chainRawAddrs, addr.encodedAddr)
addressMap[addr.encodedAddr] = addr
}
cp.SortCandidates(chainRawAddrs, 1, cp.CryptoSeed)
for i, rawAddress := range chainRawAddrs {
chainAddrs[i] = addressMap[rawAddress]
}
candidatesByHeightFunc := func(_ uint64) ([]*state.Candidate, error) {
candidates := make([]*state.Candidate, 0, numNodes)
for _, addr := range chainAddrs {
candidates = append(candidates, &state.Candidate{Address: addr.encodedAddr})
}
return candidates, nil
}
chains := make([]blockchain.Blockchain, 0, numNodes)
p2ps := make([]*directOverlay, 0, numNodes)
cs := make([]*RollDPoS, 0, numNodes)
for i := 0; i < numNodes; i++ {
ctx := context.Background()
cfg.Chain.ProducerPrivKey = hex.EncodeToString(chainAddrs[i].priKey.Bytes())
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
require.NoError(t, err)
require.NoError(t, sf.Start(ctx))
for j := 0; j < numNodes; j++ {
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, chainRawAddrs[j], big.NewInt(0))
require.NoError(t, err)
gasLimit := testutil.TestGasLimit
wsctx := protocol.WithRunActionsCtx(ctx,
protocol.RunActionsCtx{
Producer: testaddress.Addrinfo["producer"],
GasLimit: gasLimit,
})
_, err = ws.RunActions(wsctx, 0, nil)
require.NoError(t, err)
require.NoError(t, sf.Commit(ws))
}
registry := protocol.Registry{}
acc := account.NewProtocol()
require.NoError(t, registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
chain := blockchain.NewBlockchain(
cfg,
blockchain.InMemDaoOption(),
blockchain.PrecreatedStateFactoryOption(sf),
blockchain.RegistryOption(®istry),
)
require.NoError(t, registry.Register(vote.ProtocolID, vote.NewProtocol(chain)))
chain.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(chain, 0))
chain.Validator().AddActionValidators(account.NewProtocol())
chains = append(chains, chain)
actPool, err := actpool.NewActPool(chain, cfg.ActPool, actpool.EnableExperimentalActions())
require.NoError(t, err)
p2p := &directOverlay{
addr: networkAddrs[i],
peers: make(map[net.Addr]*RollDPoS),
}
p2ps = append(p2ps, p2p)
consensus, err := NewRollDPoSBuilder().
SetAddr(chainAddrs[i].encodedAddr).
SetPriKey(chainAddrs[i].priKey).
SetConfig(cfg).
SetBlockchain(chain).
SetActPool(actPool).
SetBroadcast(p2p.Broadcast).
SetCandidatesByHeightFunc(candidatesByHeightFunc).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
cs = append(cs, consensus)
}
for i := 0; i < numNodes; i++ {
for j := 0; j < numNodes; j++ {
if i != j {
p2ps[i].peers[p2ps[j].addr] = cs[j]
}
}
}
return cs, p2ps, chains
}
t.Run("1-block", func(t *testing.T) {
// TODO: fix and enable the test
t.Skip()
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 10*time.Second, func() (bool, error) {
for _, chain := range chains {
if chain.TipHeight() < 1 {
return false, nil
}
}
return true, nil
}))
})
t.Run("1-epoch", func(t *testing.T) {
if testing.Short() {
t.Skip("Skip the 1-epoch test in short mode.")
}
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 60*time.Second, func() (bool, error) {
for _, chain := range chains {
if chain.TipHeight() < 48 {
return false, nil
}
}
return true, nil
}))
})
t.Run("network-partition-time-rotation", func(t *testing.T) {
// TODO: fix and enable the test
t.Skip()
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 1 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[1].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
cs[idx].ctx.roundCalc.timeBasedRotation = true
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 60*time.Second, func() (bool, error) {
for i, chain := range chains {
if i == 1 {
continue
}
if chain.TipHeight() < 4 {
return false, nil
}
}
return true, nil
}))
})
t.Run("proposer-network-partition-blocking", func(t *testing.T) {
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 1 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[1].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
time.Sleep(5 * time.Second)
for _, chain := range chains {
header, err := chain.BlockHeaderByHeight(1)
assert.Nil(t, header)
assert.Error(t, err)
}
})
t.Run("non-proposer-network-partition-blocking", func(t *testing.T) {
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 0 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[0].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
time.Sleep(5 * time.Second)
for i, chain := range chains {
header, err := chain.BlockHeaderByHeight(1)
if i == 0 {
assert.Nil(t, header)
assert.Error(t, err)
} else {
assert.NotNil(t, header)
assert.NoError(t, err)
}
}
})
}
| 1 | 17,593 | File is not `goimports`-ed (from `goimports`) | iotexproject-iotex-core | go |
@@ -104,7 +104,6 @@ public class TableMigrationUtil {
.withMetrics(metrics)
.withPartitionPath(partitionKey)
.build();
-
}).collect(Collectors.toList());
} catch (IOException e) {
throw new RuntimeException("Unable to list files in partition: " + partitionUri, e); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.data;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.mapping.NameMapping;
import org.apache.iceberg.orc.OrcMetrics;
import org.apache.iceberg.parquet.ParquetUtil;
import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
public class TableMigrationUtil {
private static final PathFilter HIDDEN_PATH_FILTER =
p -> !p.getName().startsWith("_") && !p.getName().startsWith(".");
private TableMigrationUtil() {
}
/**
* Returns the data files in a partition by listing the partition location.
* <p>
* For Parquet and ORC partitions, this will read metrics from the file footer. For Avro partitions,
* metrics are set to null.
* <p>
* Note: certain metrics, like NaN counts, that are only supported by iceberg file writers but not file footers,
* will not be populated.
*
* @param partition partition key, e.g., "a=1/b=2"
* @param uri partition location URI
* @param format partition format, avro, parquet or orc
* @param spec a partition spec
* @param conf a Hadoop conf
* @param metricsConfig a metrics conf
* @param mapping a name mapping
* @return a List of DataFile
*/
public static List<DataFile> listPartition(Map<String, String> partition, String uri, String format,
PartitionSpec spec, Configuration conf, MetricsConfig metricsConfig,
NameMapping mapping) {
if (format.contains("avro")) {
return listAvroPartition(partition, uri, spec, conf);
} else if (format.contains("parquet")) {
return listParquetPartition(partition, uri, spec, conf, metricsConfig, mapping);
} else if (format.contains("orc")) {
return listOrcPartition(partition, uri, spec, conf, metricsConfig, mapping);
} else {
throw new UnsupportedOperationException("Unknown partition format: " + format);
}
}
private static List<DataFile> listAvroPartition(Map<String, String> partitionPath, String partitionUri,
PartitionSpec spec, Configuration conf) {
try {
Path partition = new Path(partitionUri);
FileSystem fs = partition.getFileSystem(conf);
return Arrays.stream(fs.listStatus(partition, HIDDEN_PATH_FILTER))
.filter(FileStatus::isFile)
.map(stat -> {
Metrics metrics = new Metrics(-1L, null, null, null);
String partitionKey = spec.fields().stream()
.map(PartitionField::name)
.map(name -> String.format("%s=%s", name, partitionPath.get(name)))
.collect(Collectors.joining("/"));
return DataFiles.builder(spec)
.withPath(stat.getPath().toString())
.withFormat("avro")
.withFileSizeInBytes(stat.getLen())
.withMetrics(metrics)
.withPartitionPath(partitionKey)
.build();
}).collect(Collectors.toList());
} catch (IOException e) {
throw new RuntimeException("Unable to list files in partition: " + partitionUri, e);
}
}
private static List<DataFile> listParquetPartition(Map<String, String> partitionPath, String partitionUri,
PartitionSpec spec, Configuration conf,
MetricsConfig metricsSpec, NameMapping mapping) {
try {
Path partition = new Path(partitionUri);
FileSystem fs = partition.getFileSystem(conf);
return Arrays.stream(fs.listStatus(partition, HIDDEN_PATH_FILTER))
.filter(FileStatus::isFile)
.map(stat -> {
Metrics metrics;
try {
ParquetMetadata metadata = ParquetFileReader.readFooter(conf, stat);
metrics = ParquetUtil.footerMetrics(metadata, Stream.empty(), metricsSpec, mapping);
} catch (IOException e) {
throw new RuntimeException("Unable to read the footer of the parquet file: " +
stat.getPath(), e);
}
String partitionKey = spec.fields().stream()
.map(PartitionField::name)
.map(name -> String.format("%s=%s", name, partitionPath.get(name)))
.collect(Collectors.joining("/"));
return DataFiles.builder(spec)
.withPath(stat.getPath().toString())
.withFormat("parquet")
.withFileSizeInBytes(stat.getLen())
.withMetrics(metrics)
.withPartitionPath(partitionKey)
.build();
}).collect(Collectors.toList());
} catch (IOException e) {
throw new RuntimeException("Unable to list files in partition: " + partitionUri, e);
}
}
private static List<DataFile> listOrcPartition(Map<String, String> partitionPath, String partitionUri,
PartitionSpec spec, Configuration conf,
MetricsConfig metricsSpec, NameMapping mapping) {
try {
Path partition = new Path(partitionUri);
FileSystem fs = partition.getFileSystem(conf);
return Arrays.stream(fs.listStatus(partition, HIDDEN_PATH_FILTER))
.filter(FileStatus::isFile)
.map(stat -> {
Metrics metrics = OrcMetrics.fromInputFile(HadoopInputFile.fromPath(stat.getPath(), conf),
metricsSpec, mapping);
String partitionKey = spec.fields().stream()
.map(PartitionField::name)
.map(name -> String.format("%s=%s", name, partitionPath.get(name)))
.collect(Collectors.joining("/"));
return DataFiles.builder(spec)
.withPath(stat.getPath().toString())
.withFormat("orc")
.withFileSizeInBytes(stat.getLen())
.withMetrics(metrics)
.withPartitionPath(partitionKey)
.build();
}).collect(Collectors.toList());
} catch (IOException e) {
throw new RuntimeException("Unable to list files in partition: " + partitionUri, e);
}
}
}
| 1 | 43,289 | I was saving this white-space for my retirement :nit: | apache-iceberg | java |
@@ -133,6 +133,10 @@ module Blacklight::Controller
def discard_flash_if_xhr
flash.discard if request.xhr?
end
+ deprecation_deprecate discard_flash_if_xhr: "Discarding flash messages on XHR requests is deprecated.
+ If you wish to continue this behavior, add this method to your ApplicationController with an
+ after_action :discard_flash_if_xhr filter. To disable this behavior now and remove this warning, add
+ a skip_after_action :discard_flash_if_xhr to your ApplicationController."
##
# | 1 | # frozen_string_literal: true
# Filters added to this controller apply to all controllers in the hosting application
# as this module is mixed-in to the application controller in the hosting app on installation.
module Blacklight::Controller
extend ActiveSupport::Concern
extend Deprecation
self.deprecation_horizon = 'blacklight 7.x'
included do
include Blacklight::SearchFields
helper Blacklight::SearchFields
include ActiveSupport::Callbacks
# now in application.rb file under config.filter_parameters
# filter_parameter_logging :password, :password_confirmation
helper_method :current_user_session, :current_user, :current_or_guest_user
after_action :discard_flash_if_xhr
# handle basic authorization exception with #access_denied
rescue_from Blacklight::Exceptions::AccessDenied, :with => :access_denied
# extra head content
helper_method :has_user_authentication_provider?
helper_method :blacklight_config, :blacklight_configuration_context
helper_method :search_action_url, :search_action_path, :search_facet_url, :search_facet_path
helper_method :search_state
# Specify which class to use for the search state. You can subclass SearchState if you
# want to override any of the methods (e.g. SearchState#url_for_document)
class_attribute :search_state_class
self.search_state_class = Blacklight::SearchState
# This callback runs when a user first logs in
define_callbacks :logging_in_user
set_callback :logging_in_user, :before, :transfer_guest_user_actions_to_current_user
end
def default_catalog_controller
CatalogController
end
delegate :blacklight_config, to: :default_catalog_controller
protected
##
# Context in which to evaluate blacklight configuration conditionals
def blacklight_configuration_context
@blacklight_configuration_context ||= Blacklight::Configuration::Context.new(self)
end
##
# Determine whether to render the bookmarks control
# (Needs to be available globally, as it is used in the navbar)
def render_bookmarks_control?
has_user_authentication_provider? and current_or_guest_user.present?
end
##
# Determine whether to render the saved searches link
# (Needs to be available globally, as it is used in the navbar)
def render_saved_searches?
has_user_authentication_provider? and current_user
end
# @return [Blacklight::SearchState] a memoized instance of the parameter state.
def search_state
@search_state ||= begin
if search_state_class.instance_method(:initialize).arity == -3
search_state_class.new(params, blacklight_config, self)
else
Deprecation.warn(search_state_class, "The constructor for #{search_state_class} now requires a third argument. " \
"Invoking it will 2 arguments is deprecated and will be removed in Blacklight 7.")
search_state_class.new(params, blacklight_config)
end
end
end
# Default route to the search action (used e.g. in global partials). Override this method
# in a controller or in your ApplicationController to introduce custom logic for choosing
# which action the search form should use
def search_action_url options = {}
# Rails 4.2 deprecated url helpers accepting string keys for 'controller' or 'action'
search_catalog_url(options.except(:controller, :action))
end
def search_action_path *args
if args.first.is_a? Hash
args.first[:only_path] = true
end
search_action_url(*args)
end
def search_facet_url options = {}
opts = search_state.to_h.merge(action: "facet").merge(options).except(:page)
url_for opts
end
deprecation_deprecate search_facet_url: 'Use search_facet_path instead.'
def search_facet_path(options = {})
Deprecation.silence(Blacklight::Controller) do
search_facet_url(options.merge(only_path: true))
end
end
# Returns a list of Searches from the ids in the user's history.
def searches_from_history
session[:history].blank? ? Search.none : Search.where(:id => session[:history]).order("updated_at desc")
end
# Should be provided by authentication provider
# def current_user
# end
# def current_or_guest_user
# end
# Here's a stub implementation we'll add if it isn't provided for us
def current_or_guest_user
if defined? super
super
elsif has_user_authentication_provider?
current_user
end
end
alias blacklight_current_or_guest_user current_or_guest_user
##
# We discard flash messages generated by the xhr requests to avoid
# confusing UX.
def discard_flash_if_xhr
flash.discard if request.xhr?
end
##
#
#
def has_user_authentication_provider?
respond_to? :current_user
end
def require_user_authentication_provider
raise ActionController::RoutingError, 'Not Found' unless has_user_authentication_provider?
end
##
# When a user logs in, transfer any saved searches or bookmarks to the current_user
def transfer_guest_user_actions_to_current_user
return unless respond_to? :current_user and respond_to? :guest_user and current_user and guest_user
current_user_searches = current_user.searches.pluck(:query_params)
current_user_bookmarks = current_user.bookmarks.pluck(:document_id)
guest_user.searches.reject { |s| current_user_searches.include?(s.query_params)}.each do |s|
current_user.searches << s
s.save!
end
guest_user.bookmarks.reject { |b| current_user_bookmarks.include?(b.document_id)}.each do |b|
current_user.bookmarks << b
b.save!
end
# let guest_user know we've moved some bookmarks from under it
guest_user.reload if guest_user.persisted?
end
##
# To handle failed authorization attempts, redirect the user to the
# login form and persist the current request uri as a parameter
def access_denied
# send the user home if the access was previously denied by the same
# request to avoid sending the user back to the login page
# (e.g. protected page -> logout -> returned to protected page -> home)
redirect_to root_url and flash.discard and return if request.referer and request.referer.ends_with? request.fullpath
redirect_to root_url and return unless has_user_authentication_provider?
redirect_to new_user_session_url(:referer => request.fullpath)
end
end
| 1 | 7,952 | What if you don't want to continue with the behavior, but you just want the deprecation to go away? Should we add `skip_after_action :discard_flash_if_xhr`? | projectblacklight-blacklight | rb |
@@ -309,7 +309,7 @@ func (ws *workingSet) validateNonce(blk *block.Block) error {
}
// Verify each account's Nonce
for srcAddr, receivedNonces := range accountNonceMap {
- confirmedState, err := accountutil.AccountState(ws, srcAddr)
+ confirmedState, err := accountutil.AccountStateByHash160(ws, srcAddr)
if err != nil {
return errors.Wrapf(err, "failed to get the confirmed nonce of address %s", srcAddr)
} | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package factory
import (
"context"
"sort"
"github.com/iotexproject/go-pkgs/hash"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/actpool/actioniterator"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/state"
)
var (
stateDBMtc = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "iotex_state_db",
Help: "IoTeX State DB",
},
[]string{"type"},
)
)
func init() {
prometheus.MustRegister(stateDBMtc)
}
type (
workingSet struct {
height uint64
finalized bool
dock protocol.Dock
receipts []*action.Receipt
commitFunc func(uint64) error
readviewFunc func(name string) (interface{}, error)
writeviewFunc func(name string, v interface{}) error
dbFunc func() db.KVStore
delStateFunc func(string, []byte) error
statesFunc func(opts ...protocol.StateOption) (uint64, state.Iterator, error)
digestFunc func() hash.Hash256
finalizeFunc func(uint64) error
getStateFunc func(string, []byte, interface{}) error
putStateFunc func(string, []byte, interface{}) error
revertFunc func(int) error
snapshotFunc func() int
}
workingSetCreator interface {
newWorkingSet(context.Context, uint64) (*workingSet, error)
}
)
func (ws *workingSet) digest() (hash.Hash256, error) {
if !ws.finalized {
return hash.ZeroHash256, errors.New("workingset has not been finalized yet")
}
return ws.digestFunc(), nil
}
func (ws *workingSet) Receipts() ([]*action.Receipt, error) {
if !ws.finalized {
return nil, errors.New("workingset has not been finalized yet")
}
return ws.receipts, nil
}
// Height returns the Height of the block being worked on
func (ws *workingSet) Height() (uint64, error) {
return ws.height, nil
}
func (ws *workingSet) validate(ctx context.Context) error {
if ws.finalized {
return errors.Errorf("cannot run action on a finalized working set")
}
blkCtx := protocol.MustGetBlockCtx(ctx)
if blkCtx.BlockHeight != ws.height {
return errors.Errorf(
"invalid block height %d, %d expected",
blkCtx.BlockHeight,
ws.height,
)
}
return nil
}
func (ws *workingSet) runActions(
ctx context.Context,
elps []action.SealedEnvelope,
) ([]*action.Receipt, error) {
if err := ws.validate(ctx); err != nil {
return nil, err
}
// Handle actions
receipts := make([]*action.Receipt, 0)
for _, elp := range elps {
ctx, err := withActionCtx(ctx, elp)
if err != nil {
return nil, err
}
receipt, err := ws.runAction(ctx, elp)
if err != nil {
return nil, errors.Wrap(err, "error when run action")
}
if receipt != nil {
receipts = append(receipts, receipt)
}
}
return receipts, nil
}
func withActionCtx(ctx context.Context, selp action.SealedEnvelope) (context.Context, error) {
var actionCtx protocol.ActionCtx
var err error
caller := selp.SrcPubkey().Address()
if caller == nil {
return nil, errors.New("failed to get address")
}
actionCtx.Caller = caller
actionCtx.ActionHash, err = selp.Hash()
if err != nil {
return nil, err
}
actionCtx.GasPrice = selp.GasPrice()
intrinsicGas, err := selp.IntrinsicGas()
if err != nil {
return nil, err
}
actionCtx.IntrinsicGas = intrinsicGas
actionCtx.Nonce = selp.Nonce()
return protocol.WithActionCtx(ctx, actionCtx), nil
}
func (ws *workingSet) runAction(
ctx context.Context,
elp action.SealedEnvelope,
) (*action.Receipt, error) {
if protocol.MustGetBlockCtx(ctx).GasLimit < protocol.MustGetActionCtx(ctx).IntrinsicGas {
return nil, errors.Wrap(action.ErrHitGasLimit, "block gas limit exceeded")
}
// Handle action
reg, ok := protocol.GetRegistry(ctx)
if !ok {
return nil, nil
}
for _, actionHandler := range reg.All() {
receipt, err := actionHandler.Handle(ctx, elp.Action(), ws)
elpHash, err1 := elp.Hash()
if err1 != nil {
return nil, errors.Wrapf(err1, "Failed to get hash")
}
if err != nil {
return nil, errors.Wrapf(
err,
"error when action %x mutates states",
elpHash,
)
}
if receipt != nil {
return receipt, nil
}
}
// TODO (zhi): return error
return nil, nil
}
func (ws *workingSet) finalize() error {
if ws.finalized {
return errors.New("Cannot finalize a working set twice")
}
if err := ws.finalizeFunc(ws.height); err != nil {
return err
}
ws.finalized = true
return nil
}
func (ws *workingSet) Snapshot() int {
return ws.snapshotFunc()
}
func (ws *workingSet) Revert(snapshot int) error {
return ws.revertFunc(snapshot)
}
// Commit persists all changes in RunActions() into the DB
func (ws *workingSet) Commit(ctx context.Context) error {
if err := ws.commitFunc(ws.height); err != nil {
return err
}
if err := protocolCommit(ctx, ws); err != nil {
return err
}
ws.Reset()
return nil
}
// GetDB returns the underlying DB for account/contract storage
func (ws *workingSet) GetDB() db.KVStore {
return ws.dbFunc()
}
// State pulls a state from DB
func (ws *workingSet) State(s interface{}, opts ...protocol.StateOption) (uint64, error) {
stateDBMtc.WithLabelValues("get").Inc()
cfg, err := processOptions(opts...)
if err != nil {
return ws.height, err
}
return ws.height, ws.getStateFunc(cfg.Namespace, cfg.Key, s)
}
func (ws *workingSet) States(opts ...protocol.StateOption) (uint64, state.Iterator, error) {
return ws.statesFunc(opts...)
}
// PutState puts a state into DB
func (ws *workingSet) PutState(s interface{}, opts ...protocol.StateOption) (uint64, error) {
stateDBMtc.WithLabelValues("put").Inc()
cfg, err := processOptions(opts...)
if err != nil {
return ws.height, err
}
return ws.height, ws.putStateFunc(cfg.Namespace, cfg.Key, s)
}
// DelState deletes a state from DB
func (ws *workingSet) DelState(opts ...protocol.StateOption) (uint64, error) {
stateDBMtc.WithLabelValues("delete").Inc()
cfg, err := processOptions(opts...)
if err != nil {
return ws.height, err
}
return ws.height, ws.delStateFunc(cfg.Namespace, cfg.Key)
}
// ReadView reads the view
func (ws *workingSet) ReadView(name string) (interface{}, error) {
return ws.readviewFunc(name)
}
// WriteView writeback the view to factory
func (ws *workingSet) WriteView(name string, v interface{}) error {
return ws.writeviewFunc(name, v)
}
func (ws *workingSet) ProtocolDirty(name string) bool {
return ws.dock.ProtocolDirty(name)
}
func (ws *workingSet) Load(name, key string, v interface{}) error {
return ws.dock.Load(name, key, v)
}
func (ws *workingSet) Unload(name, key string, v interface{}) error {
return ws.dock.Unload(name, key, v)
}
func (ws *workingSet) Reset() {
ws.dock.Reset()
}
// createGenesisStates initialize the genesis states
func (ws *workingSet) CreateGenesisStates(ctx context.Context) error {
if reg, ok := protocol.GetRegistry(ctx); ok {
for _, p := range reg.All() {
if gsc, ok := p.(protocol.GenesisStateCreator); ok {
if err := gsc.CreateGenesisStates(ctx, ws); err != nil {
return errors.Wrap(err, "failed to create genesis states for protocol")
}
}
}
}
return ws.finalize()
}
func (ws *workingSet) validateNonce(blk *block.Block) error {
accountNonceMap := make(map[string][]uint64)
for _, selp := range blk.Actions {
caller := selp.SrcPubkey().Address()
if caller == nil {
return errors.New("failed to get address")
}
appendActionIndex(accountNonceMap, caller.String(), selp.Nonce())
}
// Special handling for genesis block
if blk.Height() == 0 {
return nil
}
// Verify each account's Nonce
for srcAddr, receivedNonces := range accountNonceMap {
confirmedState, err := accountutil.AccountState(ws, srcAddr)
if err != nil {
return errors.Wrapf(err, "failed to get the confirmed nonce of address %s", srcAddr)
}
receivedNonces := receivedNonces
sort.Slice(receivedNonces, func(i, j int) bool { return receivedNonces[i] < receivedNonces[j] })
for i, nonce := range receivedNonces {
if nonce != confirmedState.Nonce+uint64(i+1) {
return errors.Wrapf(
action.ErrNonce,
"the %d nonce %d of address %s (confirmed nonce %d) is not continuously increasing",
i,
nonce,
srcAddr,
confirmedState.Nonce,
)
}
}
}
return nil
}
func (ws *workingSet) Process(ctx context.Context, actions []action.SealedEnvelope) error {
return ws.process(ctx, actions)
}
func (ws *workingSet) process(ctx context.Context, actions []action.SealedEnvelope) error {
var err error
reg := protocol.MustGetRegistry(ctx)
for _, act := range actions {
if ctx, err = withActionCtx(ctx, act); err != nil {
return err
}
for _, p := range reg.All() {
if validator, ok := p.(protocol.ActionValidator); ok {
if err := validator.Validate(ctx, act.Action(), ws); err != nil {
return err
}
}
}
}
for _, p := range protocol.MustGetRegistry(ctx).All() {
if pp, ok := p.(protocol.PreStatesCreator); ok {
if err := pp.CreatePreStates(ctx, ws); err != nil {
return err
}
}
}
// TODO: verify whether the post system actions are appended tail
receipts, err := ws.runActions(ctx, actions)
if err != nil {
return err
}
ws.receipts = receipts
return ws.finalize()
}
func (ws *workingSet) pickAndRunActions(
ctx context.Context,
ap actpool.ActPool,
postSystemActions []action.SealedEnvelope,
allowedBlockGasResidue uint64,
) ([]action.SealedEnvelope, error) {
err := ws.validate(ctx)
if err != nil {
return nil, err
}
receipts := make([]*action.Receipt, 0)
executedActions := make([]action.SealedEnvelope, 0)
reg := protocol.MustGetRegistry(ctx)
for _, p := range reg.All() {
if pp, ok := p.(protocol.PreStatesCreator); ok {
if err := pp.CreatePreStates(ctx, ws); err != nil {
return nil, err
}
}
}
// initial action iterator
blkCtx := protocol.MustGetBlockCtx(ctx)
if ap != nil {
actionIterator := actioniterator.NewActionIterator(ap.PendingActionMap())
for {
nextAction, ok := actionIterator.Next()
if !ok {
break
}
if nextAction.GasLimit() > blkCtx.GasLimit {
actionIterator.PopAccount()
continue
}
if ctx, err = withActionCtx(ctx, nextAction); err == nil {
for _, p := range reg.All() {
if validator, ok := p.(protocol.ActionValidator); ok {
if err = validator.Validate(ctx, nextAction.Action(), ws); err != nil {
break
}
}
}
}
if err != nil {
caller := nextAction.SrcPubkey().Address()
if caller == nil {
return nil, errors.New("failed to get address")
}
ap.DeleteAction(caller)
actionIterator.PopAccount()
continue
}
receipt, err := ws.runAction(ctx, nextAction)
switch errors.Cause(err) {
case nil:
// do nothing
case action.ErrHitGasLimit:
actionIterator.PopAccount()
continue
default:
nextActionHash, err := nextAction.Hash()
if err != nil {
return nil, errors.Wrapf(err, "Failed to get hash for %x", nextActionHash)
}
return nil, errors.Wrapf(err, "Failed to update state changes for selp %x", nextActionHash)
}
if receipt != nil {
blkCtx.GasLimit -= receipt.GasConsumed
ctx = protocol.WithBlockCtx(ctx, blkCtx)
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, nextAction)
// To prevent loop all actions in act_pool, we stop processing action when remaining gas is below
// than certain threshold
if blkCtx.GasLimit < allowedBlockGasResidue {
break
}
}
}
for _, selp := range postSystemActions {
if ctx, err = withActionCtx(ctx, selp); err != nil {
return nil, err
}
receipt, err := ws.runAction(ctx, selp)
if err != nil {
return nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, selp)
}
ws.receipts = receipts
return executedActions, ws.finalize()
}
func (ws *workingSet) ValidateBlock(ctx context.Context, blk *block.Block) error {
if err := ws.validateNonce(blk); err != nil {
return errors.Wrap(err, "failed to validate nonce")
}
if err := ws.process(ctx, blk.RunnableActions().Actions()); err != nil {
log.L().Error("Failed to update state.", zap.Uint64("height", ws.height), zap.Error(err))
return err
}
digest, err := ws.digest()
if err != nil {
return err
}
if err = blk.VerifyDeltaStateDigest(digest); err != nil {
return errors.Wrap(err, "failed to verify delta state digest")
}
if err = blk.VerifyReceiptRoot(calculateReceiptRoot(ws.receipts)); err != nil {
return errors.Wrap(err, "Failed to verify receipt root")
}
return nil
}
func (ws *workingSet) CreateBuilder(
ctx context.Context,
ap actpool.ActPool,
postSystemActions []action.SealedEnvelope,
allowedBlockGasResidue uint64,
) (*block.Builder, error) {
actions, err := ws.pickAndRunActions(ctx, ap, postSystemActions, allowedBlockGasResidue)
if err != nil {
return nil, err
}
ra := block.NewRunnableActionsBuilder().
AddActions(actions...).
Build()
blkCtx := protocol.MustGetBlockCtx(ctx)
bcCtx := protocol.MustGetBlockchainCtx(ctx)
prevBlkHash := bcCtx.Tip.Hash
digest, err := ws.digest()
if err != nil {
return nil, errors.Wrap(err, "failed to get digest")
}
blkBuilder := block.NewBuilder(ra).
SetHeight(blkCtx.BlockHeight).
SetTimestamp(blkCtx.BlockTimeStamp).
SetPrevBlockHash(prevBlkHash).
SetDeltaStateDigest(digest).
SetReceipts(ws.receipts).
SetReceiptRoot(calculateReceiptRoot(ws.receipts)).
SetLogsBloom(calculateLogsBloom(ctx, ws.receipts))
return blkBuilder, nil
}
| 1 | 23,691 | change `accountNonceMap` to map[address.Address][]uint64 | iotexproject-iotex-core | go |
@@ -38,6 +38,7 @@ public class GapicCodeGeneratorTest extends GapicTestBase2 {
.put(MainGapicProviderFactory.NODEJS, MainGapicProviderFactory.NODEJS)
.put(MainGapicProviderFactory.NODEJS_DOC, MainGapicProviderFactory.NODEJS)
.put(MainGapicProviderFactory.CSHARP, MainGapicProviderFactory.CSHARP)
+ .put(MainGapicProviderFactory.CLIENT_CONFIG, "clientconfig")
.build();
private final String apiName; | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen;
import com.google.api.codegen.gapic.MainGapicProviderFactory;
import com.google.common.collect.ImmutableMultimap;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
/** Go code generator baseline tests. */
@RunWith(Parameterized.class)
public class GapicCodeGeneratorTest extends GapicTestBase2 {
private static final ImmutableMultimap<String, String> TEST_DIR =
ImmutableMultimap.<String, String>builder()
.put(MainGapicProviderFactory.GO, MainGapicProviderFactory.GO)
.put(MainGapicProviderFactory.PHP, MainGapicProviderFactory.PHP)
.put(MainGapicProviderFactory.JAVA, MainGapicProviderFactory.JAVA)
.put(MainGapicProviderFactory.RUBY, MainGapicProviderFactory.RUBY)
.put(MainGapicProviderFactory.RUBY_DOC, MainGapicProviderFactory.RUBY)
.put(MainGapicProviderFactory.PYTHON, MainGapicProviderFactory.PYTHON)
.put(MainGapicProviderFactory.NODEJS, MainGapicProviderFactory.NODEJS)
.put(MainGapicProviderFactory.NODEJS_DOC, MainGapicProviderFactory.NODEJS)
.put(MainGapicProviderFactory.CSHARP, MainGapicProviderFactory.CSHARP)
.build();
private final String apiName;
public GapicCodeGeneratorTest(
String idForFactory,
String[] gapicConfigFileNames,
String packageConfigFileName,
List<String> snippetName,
String apiName,
String baseline) {
super(idForFactory, gapicConfigFileNames, packageConfigFileName, snippetName, baseline);
this.apiName = apiName;
for (String dir : TEST_DIR.get(idForFactory)) {
getTestDataLocator().addTestDataSource(getClass(), dir);
getTestDataLocator().addTestDataSource(getClass(), "testdata/" + dir);
}
}
@Parameters(name = "{0}")
public static List<Object[]> testedConfigs() {
return Arrays.asList(
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.GO,
new String[] {"go_gapic.yaml", "library_gapic.yaml"},
null,
"library"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.PHP,
new String[] {"php_gapic.yaml", "library_gapic.yaml"},
"library_pkg.yaml",
"library"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.JAVA,
new String[] {"java_gapic.yaml", "library_gapic.yaml"},
"library_pkg.yaml",
"library"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.JAVA,
new String[] {"java_gapic.yaml", "no_path_templates_gapic.yaml"},
"no_path_templates_pkg.yaml",
"no_path_templates"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.RUBY,
new String[] {"ruby_gapic.yaml", "library_gapic.yaml"},
"library_pkg.yaml",
"library"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.RUBY_DOC,
new String[] {"ruby_gapic.yaml", "library_gapic.yaml"},
"library_pkg.yaml",
"library"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.RUBY,
new String[] {"ruby_gapic.yaml", "multiple_services_gapic.yaml"},
"multiple_services_pkg.yaml",
"multiple_services"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.RUBY,
new String[] {"ruby_gapic.yaml", "longrunning_gapic.yaml"},
"longrunning_pkg.yaml",
"longrunning"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.PYTHON,
new String[] {"python_gapic.yaml", "library_gapic.yaml"},
"library_pkg.yaml",
"library"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.PYTHON,
new String[] {"python_gapic.yaml", "no_path_templates_gapic.yaml"},
"no_path_templates_pkg.yaml",
"no_path_templates"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.NODEJS,
new String[] {"nodejs_gapic.yaml", "library_gapic.yaml"},
"library_pkg.yaml",
"library"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.NODEJS_DOC,
new String[] {"nodejs_gapic.yaml", "library_gapic.yaml"},
"library_pkg.yaml",
"library"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.NODEJS,
new String[] {"nodejs_gapic.yaml", "no_path_templates_gapic.yaml"},
"library_pkg.yaml",
"no_path_templates"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.NODEJS,
new String[] {"nodejs_gapic.yaml", "multiple_services_gapic.yaml"},
"multiple_services_pkg.yaml",
"multiple_services"),
GapicTestBase2.createTestConfig(
MainGapicProviderFactory.CSHARP,
new String[] {"csharp_gapic.yaml", "library_gapic.yaml"},
"library_pkg.yaml",
"library"));
}
// Tests
// =====
@Test
public void library() throws Exception {
test(apiName);
}
}
| 1 | 23,910 | The `CLIENT_CONFIG` is "client_config" with underscore, so we can't reuse it in the second arg. | googleapis-gapic-generator | java |
@@ -62,6 +62,19 @@ func serviceLoggedIn(ctx context.Context, config Config, name string,
}
}
+ if adminFeatureList[session.UID] {
+ log.CDebugf(ctx, "Enabling a dir op batch size of %d",
+ bgFlushDirOpBatchSizeDefault)
+ // NOTE: This overrides any command-line parameter. It only
+ // matters until we un-feature-flag this, I think it's ok for
+ // now.
+ config.SetBGFlushDirOpBatchSize(bgFlushDirOpBatchSizeDefault)
+ } else {
+ // TODO: let non-admins have a non-1 batch size once admins
+ // test it enough.
+ config.SetBGFlushDirOpBatchSize(1)
+ }
+
config.MDServer().RefreshAuthToken(ctx)
config.BlockServer().RefreshAuthToken(ctx)
config.KBFSOps().RefreshCachedFavorites(ctx) | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"github.com/keybase/client/go/protocol/keybase1"
"golang.org/x/net/context"
)
// TODO: Add a server endpoint to get this data.
var adminFeatureList = map[keybase1.UID]bool{
"23260c2ce19420f97b58d7d95b68ca00": true, // Chris Coyne "chris"
"dbb165b7879fe7b1174df73bed0b9500": true, // Max Krohn, "max"
"ef2e49961eddaa77094b45ed635cfc00": true, // Jeremy Stribling, "strib"
"41b1f75fb55046d370608425a3208100": true, // Jack O'Connor, "oconnor663"
"9403ede05906b942fd7361f40a679500": true, // Jinyang Li, "jinyang"
"b7c2eaddcced7727bcb229751d91e800": true, // Gabriel Handford, "gabrielh"
"1563ec26dc20fd162a4f783551141200": true, // Patrick Crosby, "patrick"
"ebbe1d99410ab70123262cf8dfc87900": true, // Fred Akalin, "akalin"
"8bc0fd2f5fefd30d3ec04452600f4300": true, // Andy Alness, "alness"
"e0b4166c9c839275cf5633ff65c3e819": true, // Chris Nojima, "chrisnojima"
"d95f137b3b4a3600bc9e39350adba819": true, // Cécile Boucheron, "cecileb"
"4c230ae8d2f922dc2ccc1d2f94890700": true, // Marco Polo, "marcopolo"
"237e85db5d939fbd4b84999331638200": true, // Chris Ball, "cjb"
"69da56f622a2ac750b8e590c3658a700": true, // John Zila, "jzila"
"673a740cd20fb4bd348738b16d228219": true, // Steve Sanders, "zanderz"
"95e88f2087e480cae28f08d81554bc00": true, // Mike Maxim, "mikem"
"5c2ef2d4eddd2381daa681ac1a901519": true, // Max Goodman, "chromakode"
"08abe80bd2da8984534b2d8f7b12c700": true, // Song Gao, "songgao"
"eb08cb06e608ea41bd893946445d7919": true, // Miles Steele, "mlsteele"
}
// serviceLoggedIn should be called when a new user logs in. It
// shouldn't be called again until after serviceLoggedOut is called.
func serviceLoggedIn(ctx context.Context, config Config, name string,
bws TLFJournalBackgroundWorkStatus) {
log := config.MakeLogger("")
const sessionID = 0
session, err := config.KeybaseService().CurrentSession(ctx, sessionID)
if err != nil {
log.CDebugf(ctx, "Getting current session failed when %s is logged in, so pretending user has logged out: %v",
name, err)
serviceLoggedOut(ctx, config)
return
}
if jServer, err := GetJournalServer(config); err == nil {
err := jServer.EnableExistingJournals(
ctx, session.UID, session.VerifyingKey, bws)
if err != nil {
log.CWarningf(ctx,
"Failed to enable existing journals: %v", err)
}
}
if config.DiskBlockCache() == nil {
dbc, err := newDiskBlockCacheStandard(config,
diskBlockCacheRootFromStorageRoot(config.StorageRoot()))
if err == nil {
config.SetDiskBlockCache(dbc)
}
}
config.MDServer().RefreshAuthToken(ctx)
config.BlockServer().RefreshAuthToken(ctx)
config.KBFSOps().RefreshCachedFavorites(ctx)
config.KBFSOps().PushStatusChange()
}
// serviceLoggedOut should be called when the current user logs out.
func serviceLoggedOut(ctx context.Context, config Config) {
if jServer, err := GetJournalServer(config); err == nil {
jServer.shutdownExistingJournals(ctx)
}
config.ResetCaches()
config.MDServer().RefreshAuthToken(ctx)
config.BlockServer().RefreshAuthToken(ctx)
config.KBFSOps().RefreshCachedFavorites(ctx)
config.KBFSOps().PushStatusChange()
// Clear any cached MD for all private TLFs, as they shouldn't be
// readable by a logged out user. We assume that a logged-out
// call always comes before a logged-in call.
config.KBFSOps().ClearPrivateFolderMD(ctx)
}
| 1 | 16,639 | Does this mean all admins get the default batch size (which is 20) no matter the value of `-sync-batch-size`? It seems the command line flag doesn't do anything then does it? | keybase-kbfs | go |
@@ -35,8 +35,11 @@ class CurlDownloader implements DownloaderInterface
*
* @throws DownloaderException
*/
- public function download(string $url, array $headers = []): string
+ public function download(string $url, array $headers = [], array $acceptedContentTypes = null): string
{
+ if (null !== $acceptedContentTypes) {
+ throw new DownloaderException('CurlDownloader not supported param `acceptedHeaderTypes`');
+ }
$ch = curl_init();
curl_setopt($ch, CURLOPT_HEADER, 0);
curl_setopt($ch, CURLOPT_HTTPHEADER, $headers); | 1 | <?php
/**
* Copyright © Ergonode Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Core\Infrastructure\Service;
use Symfony\Component\HttpFoundation\Response;
use Ergonode\Core\Infrastructure\Exception\DownloaderException;
use Psr\Log\LoggerInterface;
use Ergonode\Core\Infrastructure\Exception\FileNotFoundDownloaderException;
use Ergonode\Core\Infrastructure\Exception\AccessDeniedDownloaderException;
use Ergonode\Core\Infrastructure\Exception\BadRequestDownloaderException;
/**
* @deprecated
*/
class CurlDownloader implements DownloaderInterface
{
private const AGENT = 'Mozilla/5.0 '
.'(Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36';
private LoggerInterface $logger;
public function __construct(LoggerInterface $logger)
{
$this->logger = $logger;
}
/**
* @param Header[] $headers
*
* @throws DownloaderException
*/
public function download(string $url, array $headers = []): string
{
$ch = curl_init();
curl_setopt($ch, CURLOPT_HEADER, 0);
curl_setopt($ch, CURLOPT_HTTPHEADER, $headers);
curl_setopt($ch, CURLOPT_VERBOSE, 0);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_URL, $url);
curl_setopt($ch, CURLOPT_USERAGENT, self::AGENT);
$content = curl_exec($ch);
$code = curl_getinfo($ch, CURLINFO_HTTP_CODE);
if (Response::HTTP_OK === $code && $content) {
return $content;
}
$this->logger->info(sprintf('Can\'t download file %s, code %s', $url, $code));
switch ($code) {
case Response::HTTP_NOT_FOUND:
throw new FileNotFoundDownloaderException($url);
case Response::HTTP_FORBIDDEN:
case Response::HTTP_UNAUTHORIZED:
throw new AccessDeniedDownloaderException($url);
case Response::HTTP_BAD_REQUEST:
throw new BadRequestDownloaderException($url);
default:
throw new DownloaderException(sprintf('Can\'t download file from %s', $url));
}
}
}
| 1 | 9,718 | Set default value as empty array instead of null | ergonode-backend | php |
@@ -121,6 +121,10 @@ mainLoop:
logrus.WithField("route", routeUpd).Debug("Ignoring route with no link index.")
continue
}
+ if routeUpd.Dst == nil {
+ logrus.WithField("route", routeUpd).Debug("Ignoring route with no destination")
+ continue
+ }
idx := routeUpd.LinkIndex
oldUpds := updatesByIfaceIdx[idx] | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ifacemonitor
import (
"context"
"net"
"syscall"
"time"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
"github.com/projectcalico/felix/timeshim"
)
const FlapDampingDelay = 100 * time.Millisecond
type updateFilter struct {
Time timeshim.Interface
}
type UpdateFilterOp func(filter *updateFilter)
func WithTimeShim(t timeshim.Interface) UpdateFilterOp {
return func(filter *updateFilter) {
filter.Time = t
}
}
// FilterUpdates filters out updates that occur when IPs are quickly removed and re-added.
// Some DHCP clients flap the IP during an IP renewal, for example.
//
// Algorithm:
// * Maintain a queue of link and address updates per interface.
// * When we see a potential flap (i.e. an IP deletion), defer processing the queue for a while.
// * If the flap resolves itself (i.e. the IP is added back), suppress the IP deletion.
func FilterUpdates(ctx context.Context,
routeOutC chan<- netlink.RouteUpdate, routeInC <-chan netlink.RouteUpdate,
linkOutC chan<- netlink.LinkUpdate, linkInC <-chan netlink.LinkUpdate,
options ...UpdateFilterOp,
) {
// Propagate failures to the downstream channels.
defer close(routeOutC)
defer close(linkOutC)
u := &updateFilter{
Time: timeshim.RealTime(),
}
for _, op := range options {
op(u)
}
logrus.Debug("FilterUpdates: starting")
var timerC <-chan time.Time
type timestampedUpd struct {
ReadyAt time.Time
Update interface{} // RouteUpdate or LinkUpdate
}
updatesByIfaceIdx := map[int][]timestampedUpd{}
mainLoop:
for {
select {
case <-ctx.Done():
logrus.Info("FilterUpdates: Context expired, stopping")
return
case linkUpd, ok := <-linkInC:
if !ok {
logrus.Error("FilterUpdates: link input channel closed.")
return
}
idx := int(linkUpd.Index)
linkIsUp := linkUpd.Header.Type == syscall.RTM_NEWLINK && linkIsOperUp(linkUpd.Link)
var delay time.Duration
if linkIsUp {
if len(updatesByIfaceIdx[idx]) == 0 {
// Empty queue (so no flap in progress) and the link is up, no need to delay the message.
linkOutC <- linkUpd
continue mainLoop
}
// Link is up but potential flap in progress, queue the update behind the other messages.
delay = 0
} else {
// We delay link down updates because a flap can involve both a link down and an IP removal.
// Since we receive those two messages over separate channels, the two messages can race.
delay = FlapDampingDelay
}
updatesByIfaceIdx[idx] = append(updatesByIfaceIdx[idx],
timestampedUpd{
ReadyAt: u.Time.Now().Add(delay),
Update: linkUpd,
})
case routeUpd, ok := <-routeInC:
if !ok {
logrus.Error("FilterUpdates: route input channel closed.")
return
}
logrus.WithField("route", routeUpd).Debug("Route update")
if !routeIsLocalUnicast(routeUpd.Route) {
logrus.WithField("route", routeUpd).Debug("Ignoring non-local route.")
continue
}
if routeUpd.LinkIndex == 0 {
logrus.WithField("route", routeUpd).Debug("Ignoring route with no link index.")
continue
}
idx := routeUpd.LinkIndex
oldUpds := updatesByIfaceIdx[idx]
var readyToSendTime time.Time
if routeUpd.Type == unix.RTM_NEWROUTE {
logrus.WithField("addr", routeUpd.Dst).Debug("FilterUpdates: got address ADD")
if len(oldUpds) == 0 {
// This is an add for a new IP and there's nothing else in the queue for this interface.
// Short circuit. We care about flaps where IPs are temporarily removed so no need to
// delay an add.
logrus.Debug("FilterUpdates: add with empty queue, short circuit.")
routeOutC <- routeUpd
continue
}
// Else, there's something else in the queue, need to process the queue...
logrus.Debug("FilterUpdates: add with non-empty queue.")
// We don't actually need to delay the add itself so we don't set any delay here. It will
// still be queued up behind other updates.
readyToSendTime = u.Time.Now()
} else {
// Got a delete, it might be a flap so queue the update.
logrus.WithField("addr", routeUpd.Dst).Debug("FilterUpdates: got address DEL")
readyToSendTime = u.Time.Now().Add(FlapDampingDelay)
}
// Coalesce updates for the same IP by squashing any previous updates for the same CIDR before
// we append this update to the queue. We need to scan the whole queue because there may be
// updates for different IPs in flight.
upds := oldUpds[:0]
for _, upd := range oldUpds {
logrus.WithField("previous", upd).Debug("FilterUpdates: examining previous update.")
if oldAddrUpd, ok := upd.Update.(netlink.RouteUpdate); ok {
if ipNetsEqual(oldAddrUpd.Dst, routeUpd.Dst) {
// New update for the same IP, suppress the old update
logrus.WithField("address", oldAddrUpd.Dst.String()).Debug(
"Received update for same IP within a short time, squashed the old update.")
continue
}
}
upds = append(upds, upd)
}
upds = append(upds, timestampedUpd{ReadyAt: readyToSendTime, Update: routeUpd})
updatesByIfaceIdx[idx] = upds
case <-timerC:
logrus.Debug("FilterUpdates: timer popped.")
timerC = nil
}
if timerC != nil {
// Optimisation: we much have just queued an update but there's already a timer set and we know
// that timer must pop before the one for the new update. Skip recalculating the timer.
logrus.Debug("FilterUpdates: timer already set.")
continue mainLoop
}
var nextUpdTime time.Time
for idx, upds := range updatesByIfaceIdx {
logrus.WithField("ifaceIdx", idx).Debug("FilterUpdates: examining updates for interface.")
for len(upds) > 0 {
firstUpd := upds[0]
if u.Time.Since(firstUpd.ReadyAt) >= 0 {
// Either update is old enough to prevent flapping or it's an address being added.
// Ready to send...
logrus.WithField("update", firstUpd).Debug("FilterUpdates: update ready to send.")
switch u := firstUpd.Update.(type) {
case netlink.RouteUpdate:
routeOutC <- u
case netlink.LinkUpdate:
linkOutC <- u
}
upds = upds[1:]
} else {
// Update is too new, figure out when it'll be safe to send it.
logrus.WithField("update", firstUpd).Debug("FilterUpdates: update not ready.")
if nextUpdTime.IsZero() || firstUpd.ReadyAt.Before(nextUpdTime) {
nextUpdTime = firstUpd.ReadyAt
}
break
}
}
if len(upds) == 0 {
logrus.WithField("ifaceIdx", idx).Debug("FilterUpdates: no more updates for interface.")
delete(updatesByIfaceIdx, idx)
} else {
logrus.WithField("ifaceIdx", idx).WithField("num", len(upds)).Debug(
"FilterUpdates: still updates for interface.")
updatesByIfaceIdx[idx] = upds
}
}
if nextUpdTime.IsZero() {
// Queue is empty so no need to schedule a timer.
continue mainLoop
}
// Schedule timer to process the rest of the queue.
delay := u.Time.Until(nextUpdTime)
if delay <= 0 {
delay = 1
}
logrus.WithField("delay", delay).Debug("FilterUpdates: calculated delay.")
timerC = u.Time.After(delay)
}
}
func ipNetsEqual(a *net.IPNet, b *net.IPNet) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
aSize, aBits := a.Mask.Size()
bSize, bBits := b.Mask.Size()
return a.IP.Equal(b.IP) && aSize == bSize && aBits == bBits
}
func routeIsLocalUnicast(route netlink.Route) bool {
return route.Type == unix.RTN_LOCAL
}
| 1 | 19,028 | Would be good to UT this case | projectcalico-felix | go |
@@ -40,7 +40,7 @@ namespace Ethereum.Blockchain.Test
{
string expectedTypeName = ExpectedTypeName(directory);
Type type = types.SingleOrDefault(t => string.Equals(t.Name, expectedTypeName, StringComparison.InvariantCultureIgnoreCase));
- if(type == null && directory != "stEWASMTests" && directory != "runtimes")
+ if(type == null && directory != "stEWASMTests" && directory != "Specs" && directory != "runtimes")
{
if (new DirectoryInfo(directory).GetFiles().Any(f => f.Name.Contains(".resources.")))
{ | 1 | /*
* Copyright (c) 2018 Demerzel Solutions Limited
* This file is part of the Nethermind library.
*
* The Nethermind library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Nethermind library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using NUnit.Framework;
namespace Ethereum.Blockchain.Test
{
[TestFixture][Parallelizable(ParallelScope.All)]
public class MetaTests
{
[Test]
public void All_categories_are_tested()
{
string[] directories = Directory.GetDirectories(AppDomain.CurrentDomain.BaseDirectory)
.Select(Path.GetFileName)
.ToArray();
Type[] types = GetType().Assembly.GetTypes();
List<string> missingCategories = new List<string>();
foreach (string directory in directories)
{
string expectedTypeName = ExpectedTypeName(directory);
Type type = types.SingleOrDefault(t => string.Equals(t.Name, expectedTypeName, StringComparison.InvariantCultureIgnoreCase));
if(type == null && directory != "stEWASMTests" && directory != "runtimes")
{
if (new DirectoryInfo(directory).GetFiles().Any(f => f.Name.Contains(".resources.")))
{
continue;
}
missingCategories.Add(directory);
}
}
foreach (string missing in missingCategories)
{
Console.WriteLine($"{missing} category is missing");
}
Assert.AreEqual(0, missingCategories.Count);
}
private static string ExpectedTypeName(string directory)
{
string expectedTypeName = directory.Remove(0, 2);
if (!expectedTypeName.EndsWith("Tests"))
{
if (!expectedTypeName.EndsWith("Test"))
{
expectedTypeName += "Tests";
}
else
{
expectedTypeName += "s";
}
}
return expectedTypeName;
}
}
} | 1 | 24,367 | this was updated after linking the latest ethereum/tests after a submodules recursive update | NethermindEth-nethermind | .cs |
@@ -13,6 +13,7 @@ import { Fragment } from './create-element';
export function Component(props, context) {
this.props = props;
this.context = context;
+ this.__data = {};
}
/** | 1 | import { assign } from './util';
import { diff, commitRoot } from './diff/index';
import options from './options';
import { Fragment } from './create-element';
/**
* Base Component class. Provides `setState()` and `forceUpdate()`, which
* trigger rendering
* @param {object} props The initial component props
* @param {object} context The initial context from parent components'
* getChildContext
*/
export function Component(props, context) {
this.props = props;
this.context = context;
}
/**
* Update component state and schedule a re-render.
* @param {object | ((s: object, p: object) => object)} update A hash of state
* properties to update with new values or a function that given the current
* state and props returns a new partial state
* @param {() => void} [callback] A function to be called once component state is
* updated
*/
Component.prototype.setState = function(update, callback) {
// only clone state when copying to nextState the first time.
let s;
if (this._nextState !== this.state) {
s = this._nextState;
} else {
s = this._nextState = assign({}, this.state);
}
if (typeof update == 'function') {
update = update(s, this.props);
}
if (update) {
assign(s, update);
}
// Skip update if updater function returned null
if (update == null) return;
if (this._vnode) {
this._force = false;
if (callback) this._renderCallbacks.push(callback);
enqueueRender(this);
}
};
/**
* Immediately perform a synchronous re-render of the component
* @param {() => void} [callback] A function to be called after component is
* re-rendered
*/
Component.prototype.forceUpdate = function(callback) {
if (this._vnode) {
// Set render mode so that we can differentiate where the render request
// is coming from. We need this because forceUpdate should never call
// shouldComponentUpdate
this._force = true;
if (callback) this._renderCallbacks.push(callback);
enqueueRender(this);
}
};
/**
* Accepts `props` and `state`, and returns a new Virtual DOM tree to build.
* Virtual DOM is generally constructed via [JSX](http://jasonformat.com/wtf-is-jsx).
* @param {object} props Props (eg: JSX attributes) received from parent
* element/component
* @param {object} state The component's current state
* @param {object} context Context object, as returned by the nearest
* ancestor's `getChildContext()`
* @returns {import('./index').ComponentChildren | void}
*/
Component.prototype.render = Fragment;
/**
* @param {import('./internal').VNode} vnode
* @param {number | null} [childIndex]
*/
export function getDomSibling(vnode, childIndex) {
if (childIndex == null) {
// Use childIndex==null as a signal to resume the search from the vnode's sibling
return vnode._parent
? getDomSibling(vnode._parent, vnode._parent._children.indexOf(vnode) + 1)
: null;
}
let sibling;
for (; childIndex < vnode._children.length; childIndex++) {
sibling = vnode._children[childIndex];
if (sibling != null && sibling._dom != null) {
// Since updateParentDomPointers keeps _dom pointer correct,
// we can rely on _dom to tell us if this subtree contains a
// rendered DOM node, and what the first rendered DOM node is
return sibling._dom;
}
}
// If we get here, we have not found a DOM node in this vnode's children.
// We must resume from this vnode's sibling (in it's parent _children array)
// Only climb up and search the parent if we aren't searching through a DOM
// VNode (meaning we reached the DOM parent of the original vnode that began
// the search)
return typeof vnode.type === 'function' ? getDomSibling(vnode) : null;
}
/**
* Trigger in-place re-rendering of a component.
* @param {import('./internal').Component} component The component to rerender
*/
function renderComponent(component) {
let vnode = component._vnode,
oldDom = vnode._dom,
parentDom = component._parentDom;
if (parentDom) {
let commitQueue = [];
let newDom = diff(
parentDom,
vnode,
assign({}, vnode),
component._context,
parentDom.ownerSVGElement !== undefined,
null,
commitQueue,
oldDom == null ? getDomSibling(vnode) : oldDom
);
commitRoot(commitQueue, vnode);
if (newDom != oldDom) {
updateParentDomPointers(vnode);
}
}
}
/**
* @param {import('./internal').VNode} vnode
*/
function updateParentDomPointers(vnode) {
if ((vnode = vnode._parent) != null && vnode._component != null) {
vnode._dom = vnode._component.base = null;
for (let i = 0; i < vnode._children.length; i++) {
let child = vnode._children[i];
if (child != null && child._dom != null) {
vnode._dom = vnode._component.base = child._dom;
break;
}
}
return updateParentDomPointers(vnode);
}
}
/**
* The render queue
* @type {Array<import('./internal').Component>}
*/
let q = [];
/**
* Asynchronously schedule a callback
* @type {(cb: () => void) => void}
*/
/* istanbul ignore next */
// Note the following line isn't tree-shaken by rollup cuz of rollup/rollup#2566
const defer =
typeof Promise == 'function'
? Promise.prototype.then.bind(Promise.resolve())
: setTimeout;
/*
* The value of `Component.debounce` must asynchronously invoke the passed in callback. It is
* important that contributors to Preact can consistently reason about what calls to `setState`, etc.
* do, and when their effects will be applied. See the links below for some further reading on designing
* asynchronous APIs.
* * [Designing APIs for Asynchrony](https://blog.izs.me/2013/08/designing-apis-for-asynchrony)
* * [Callbacks synchronous and asynchronous](https://blog.ometer.com/2011/07/24/callbacks-synchronous-and-asynchronous/)
*/
let prevDebounce;
/**
* Enqueue a rerender of a component
* @param {import('./internal').Component} c The component to rerender
*/
export function enqueueRender(c) {
if (
(!c._dirty && (c._dirty = true) && q.push(c) === 1) ||
prevDebounce !== options.debounceRendering
) {
prevDebounce = options.debounceRendering;
(prevDebounce || defer)(process);
}
}
/** Flush the render queue by rerendering all queued components */
function process() {
let p;
q.sort((a, b) => b._vnode._depth - a._vnode._depth);
while ((p = q.pop())) {
// forceUpdate's callback argument is reused here to indicate a non-forced update.
if (p._dirty) renderComponent(p);
}
}
| 1 | 14,624 | Todo: find a way to only do this in one spot, ideally with defaults for the stuff we use | preactjs-preact | js |
@@ -21,14 +21,19 @@ package org.apache.iceberg.parquet;
import java.io.File;
import java.io.IOException;
+import java.util.Map;
+
import org.apache.avro.generic.GenericData;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.Schema;
import org.apache.iceberg.TestMetrics;
import org.apache.iceberg.io.InputFile;
+import org.apache.parquet.hadoop.ParquetFileReader;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
+import static org.apache.iceberg.Files.localInput;
+
/**
* Test Metrics for Parquet.
*/ | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.parquet;
import java.io.File;
import java.io.IOException;
import org.apache.avro.generic.GenericData;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.Schema;
import org.apache.iceberg.TestMetrics;
import org.apache.iceberg.io.InputFile;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
/**
* Test Metrics for Parquet.
*/
public class TestParquetMetrics extends TestMetrics {
@Rule
public TemporaryFolder temp = new TemporaryFolder();
@Override
public Metrics getMetrics(InputFile file) {
return ParquetUtil.fileMetrics(file);
}
@Override
public File writeRecords(Schema schema, GenericData.Record... records) throws IOException {
return ParquetWritingTestUtils.writeRecords(temp, schema, records);
}
}
| 1 | 15,123 | Imports should not have blank lines. Sorry there isn't a checkstyle rule running for this yet, we still need to update the build for this one. | apache-iceberg | java |
@@ -3096,6 +3096,8 @@ class Booster(object):
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
+ if callable(feval):
+ feval = [feval]
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0: | 1 | # coding: utf-8
"""Wrapper for C API of LightGBM."""
from __future__ import absolute_import, print_function
import copy
import ctypes
import os
import warnings
from tempfile import NamedTemporaryFile
from collections import OrderedDict
import numpy as np
import scipy.sparse
from .compat import (PANDAS_INSTALLED, DataFrame, Series, is_dtype_sparse,
DataTable,
decode_string, string_type,
integer_types, numeric_types,
json, json_default_with_numpy,
range_, zip_)
from .libpath import find_lib_path
def _log_callback(msg):
"""Redirect logs from native library into Python console."""
print("{0:s}".format(decode_string(msg)), end='')
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
callback = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
lib.callback = callback(_log_callback)
if lib.LGBM_RegisterLogCallback(lib.callback) != 0:
raise LightGBMError(decode_string(lib.LGBM_GetLastError()))
return lib
_LIB = _load_lib()
def _safe_call(ret):
"""Check the return value from C API call.
Parameters
----------
ret : int
The return value from C API calls.
"""
if ret != 0:
raise LightGBMError(decode_string(_LIB.LGBM_GetLastError()))
def is_numeric(obj):
"""Check whether object is a number or not, include numpy number, etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check whether data is a numpy 1-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_1d_list(data):
"""Check whether data is a 1-D list."""
return isinstance(data, list) and (not data or is_numeric(data[0]))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""Convert data to numpy 1-D array."""
if is_numpy_1d_array(data):
if data.dtype == dtype:
return data
else:
return data.astype(dtype=dtype, copy=False)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, Series):
if _get_bad_pandas_dtypes([data.dtypes]):
raise ValueError('Series.dtypes must be int, float or bool')
return np.array(data, dtype=dtype, copy=False) # SparseArray should be supported as well
else:
raise TypeError("Wrong type({0}) for {1}.\n"
"It should be list, numpy 1-D array or pandas Series".format(type(data).__name__, name))
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.fromiter(cptr, dtype=np.float32, count=length)
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.fromiter(cptr, dtype=np.float64, count=length)
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.fromiter(cptr, dtype=np.int32, count=length)
else:
raise RuntimeError('Expected int32 pointer')
def cint64_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int64)):
return np.fromiter(cptr, dtype=np.int64, count=length)
else:
raise RuntimeError('Expected int64 pointer')
def c_str(string):
"""Convert a Python string to C string."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a Python array to C array."""
return (ctype * len(values))(*values)
def param_dict_to_str(data):
"""Convert Python dictionary to string, which is passed to C API."""
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
def to_string(x):
if isinstance(x, list):
return "[{}]".format(','.join(map(str, x)))
else:
return str(x)
pairs.append(str(key) + '=' + ','.join(map(to_string, val)))
elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):
pairs.append(str(key) + '=' + str(val))
elif val is not None:
raise TypeError('Unknown type of parameter:%s, got:%s'
% (key, type(val).__name__))
return ' '.join(pairs)
class _TempFile(object):
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.isfile(self.name):
os.remove(self.name)
def readlines(self):
with open(self.name, "r+") as f:
ret = f.readlines()
return ret
def writelines(self, lines):
with open(self.name, "w+") as f:
f.writelines(lines)
class LightGBMError(Exception):
"""Error thrown by LightGBM."""
pass
class _ConfigAliases(object):
aliases = {"bin_construct_sample_cnt": {"bin_construct_sample_cnt",
"subsample_for_bin"},
"boosting": {"boosting",
"boosting_type",
"boost"},
"categorical_feature": {"categorical_feature",
"cat_feature",
"categorical_column",
"cat_column"},
"data_random_seed": {"data_random_seed",
"data_seed"},
"early_stopping_round": {"early_stopping_round",
"early_stopping_rounds",
"early_stopping",
"n_iter_no_change"},
"enable_bundle": {"enable_bundle",
"is_enable_bundle",
"bundle"},
"eval_at": {"eval_at",
"ndcg_eval_at",
"ndcg_at",
"map_eval_at",
"map_at"},
"group_column": {"group_column",
"group",
"group_id",
"query_column",
"query",
"query_id"},
"header": {"header",
"has_header"},
"ignore_column": {"ignore_column",
"ignore_feature",
"blacklist"},
"is_enable_sparse": {"is_enable_sparse",
"is_sparse",
"enable_sparse",
"sparse"},
"label_column": {"label_column",
"label"},
"machines": {"machines",
"workers",
"nodes"},
"metric": {"metric",
"metrics",
"metric_types"},
"num_class": {"num_class",
"num_classes"},
"num_iterations": {"num_iterations",
"num_iteration",
"n_iter",
"num_tree",
"num_trees",
"num_round",
"num_rounds",
"num_boost_round",
"n_estimators"},
"objective": {"objective",
"objective_type",
"app",
"application"},
"pre_partition": {"pre_partition",
"is_pre_partition"},
"two_round": {"two_round",
"two_round_loading",
"use_two_round_loading"},
"verbosity": {"verbosity",
"verbose"},
"weight_column": {"weight_column",
"weight"}}
@classmethod
def get(cls, *args):
ret = set()
for i in args:
ret |= cls.aliases.get(i, {i})
return ret
MAX_INT32 = (1 << 31) - 1
"""Macro definition of data type in C API of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matrix is row major in Python"""
C_API_IS_ROW_MAJOR = 1
"""Macro definition of prediction type in C API of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
C_API_PREDICT_CONTRIB = 3
"""Macro definition of sparse matrix type"""
C_API_MATRIX_TYPE_CSR = 0
C_API_MATRIX_TYPE_CSC = 1
"""Macro definition of feature importance type"""
C_API_FEATURE_IMPORTANCE_SPLIT = 0
C_API_FEATURE_IMPORTANCE_GAIN = 1
"""Data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
"""String name to int feature importance type mapper"""
FEATURE_IMPORTANCE_TYPE_MAPPER = {"split": C_API_FEATURE_IMPORTANCE_SPLIT,
"gain": C_API_FEATURE_IMPORTANCE_GAIN}
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
warnings.warn("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data
def c_float_array(data):
"""Get pointer of float numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError("Expected np.float32 or np.float64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def c_int_array(data):
"""Get pointer of int numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError("Expected np.int32 or np.int64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def _get_bad_pandas_dtypes(dtypes):
pandas_dtype_mapper = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'bool': 'int',
'float16': 'float', 'float32': 'float', 'float64': 'float'}
bad_indices = [i for i, dtype in enumerate(dtypes) if (dtype.name not in pandas_dtype_mapper
and (not is_dtype_sparse(dtype)
or dtype.subtype.name not in pandas_dtype_mapper))]
return bad_indices
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, DataFrame):
if len(data.shape) != 2 or data.shape[0] < 1:
raise ValueError('Input data must be 2 dimensional and non empty.')
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = list(data.select_dtypes(include=['category']).columns)
cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered]
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip_(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is list
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto': # use cat cols from DataFrame
categorical_feature = cat_cols_not_ordered
else: # use cat cols specified by user
categorical_feature = list(categorical_feature)
if feature_name == 'auto':
feature_name = list(data.columns)
bad_indices = _get_bad_pandas_dtypes(data.dtypes)
if bad_indices:
raise ValueError("DataFrame.dtypes for data must be int, float or bool.\n"
"Did not expect the data types in the following fields: "
+ ', '.join(data.columns[bad_indices]))
data = data.values
if data.dtype != np.float32 and data.dtype != np.float64:
data = data.astype(np.float32)
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
if _get_bad_pandas_dtypes(label.dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
label = np.ravel(label.values.astype(np.float32, copy=False))
return label
def _dump_pandas_categorical(pandas_categorical, file_name=None):
pandas_str = ('\npandas_categorical:'
+ json.dumps(pandas_categorical, default=json_default_with_numpy)
+ '\n')
if file_name is not None:
with open(file_name, 'a') as f:
f.write(pandas_str)
return pandas_str
def _load_pandas_categorical(file_name=None, model_str=None):
pandas_key = 'pandas_categorical:'
offset = -len(pandas_key)
if file_name is not None:
max_offset = -os.path.getsize(file_name)
with open(file_name, 'rb') as f:
while True:
if offset < max_offset:
offset = max_offset
f.seek(offset, os.SEEK_END)
lines = f.readlines()
if len(lines) >= 2:
break
offset *= 2
last_line = decode_string(lines[-1]).strip()
if not last_line.startswith(pandas_key):
last_line = decode_string(lines[-2]).strip()
elif model_str is not None:
idx = model_str.rfind('\n', 0, offset)
last_line = model_str[idx:].strip()
if last_line.startswith(pandas_key):
return json.loads(last_line[len(pandas_key):])
else:
return None
class _InnerPredictor(object):
"""_InnerPredictor of LightGBM.
Not exposed to user.
Used only for prediction, usually used for continued training.
.. note::
Can be converted from Booster, but cannot be converted to Booster.
"""
def __init__(self, model_file=None, booster_handle=None, pred_parameter=None):
"""Initialize the _InnerPredictor.
Parameters
----------
model_file : string or None, optional (default=None)
Path to the model file.
booster_handle : object or None, optional (default=None)
Handle of Booster.
pred_parameter: dict or None, optional (default=None)
Other parameters for the prediciton.
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = self.current_iteration()
self.pandas_categorical = None
else:
raise TypeError('Need model_file or booster_handle to create a predictor')
pred_parameter = {} if pred_parameter is None else pred_parameter
self.pred_parameter = param_dict_to_str(pred_parameter)
def __del__(self):
try:
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __getstate__(self):
this = self.__dict__.copy()
this.pop('handle', None)
return this
def predict(self, data, start_iteration=0, num_iteration=-1,
raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False,
is_reshape=True):
"""Predict logic.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
When data type is string, it represents the path of txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
num_iteration : int, optional (default=-1)
Iteration used for prediction.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
data_has_header : bool, optional (default=False)
Whether data has header.
Used only for txt data.
is_reshape : bool, optional (default=True)
Whether to reshape to (nrow, ncol).
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib:
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if isinstance(data, string_type):
with _TempFile() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(data),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
c_str(f.name)))
lines = f.readlines()
nrow = len(lines)
preds = [float(token) for line in lines for token in line.split('\t')]
preds = np.array(preds, dtype=np.float64, copy=False)
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, list):
try:
data = np.array(data)
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, DataTable):
preds, nrow = self.__pred_for_np2d(data.to_numpy(), start_iteration, num_iteration, predict_type)
else:
try:
warnings.warn('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except BaseException:
raise TypeError('Cannot predict data for type {}'.format(type(data).__name__))
preds, nrow = self.__pred_for_csr(csr, start_iteration, num_iteration, predict_type)
if pred_leaf:
preds = preds.astype(np.int32)
is_sparse = scipy.sparse.issparse(preds) or isinstance(preds, list)
if is_reshape and not is_sparse and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError('Length of predict result (%d) cannot be divide nrow (%d)'
% (preds.size, nrow))
return preds
def __get_num_preds(self, start_iteration, num_iteration, nrow, predict_type):
"""Get size of prediction result."""
if nrow > MAX_INT32:
raise LightGBMError('LightGBM cannot perform prediction for data'
'with number of rows greater than MAX_INT32 (%d).\n'
'You can split your data into chunks'
'and then concatenate predictions for them' % MAX_INT32)
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, start_iteration, num_iteration, predict_type):
"""Predict for a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
def inner_predict(mat, start_iteration, num_iteration, predict_type, preds=None):
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(start_iteration, num_iteration, mat.shape[0], predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
nrow = mat.shape[0]
if nrow > MAX_INT32:
sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for chunk, (start_idx_pred, end_idx_pred) in zip_(np.array_split(mat, sections),
zip_(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(chunk, start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(mat, start_iteration, num_iteration, predict_type)
def __create_sparse_native(self, cs, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
indptr_type, data_type, is_csr=True):
# create numpy array from output arrays
data_indices_len = out_shape[0]
indptr_len = out_shape[1]
if indptr_type == C_API_DTYPE_INT32:
out_indptr = cint32_array_to_numpy(out_ptr_indptr, indptr_len)
elif indptr_type == C_API_DTYPE_INT64:
out_indptr = cint64_array_to_numpy(out_ptr_indptr, indptr_len)
else:
raise TypeError("Expected int32 or int64 type for indptr")
if data_type == C_API_DTYPE_FLOAT32:
out_data = cfloat32_array_to_numpy(out_ptr_data, data_indices_len)
elif data_type == C_API_DTYPE_FLOAT64:
out_data = cfloat64_array_to_numpy(out_ptr_data, data_indices_len)
else:
raise TypeError("Expected float32 or float64 type for data")
out_indices = cint32_array_to_numpy(out_ptr_indices, data_indices_len)
# break up indptr based on number of rows (note more than one matrix in multiclass case)
per_class_indptr_shape = cs.indptr.shape[0]
# for CSC there is extra column added
if not is_csr:
per_class_indptr_shape += 1
out_indptr_arrays = np.split(out_indptr, out_indptr.shape[0] / per_class_indptr_shape)
# reformat output into a csr or csc matrix or list of csr or csc matrices
cs_output_matrices = []
offset = 0
for cs_indptr in out_indptr_arrays:
matrix_indptr_len = cs_indptr[cs_indptr.shape[0] - 1]
cs_indices = out_indices[offset + cs_indptr[0]:offset + matrix_indptr_len]
cs_data = out_data[offset + cs_indptr[0]:offset + matrix_indptr_len]
offset += matrix_indptr_len
# same shape as input csr or csc matrix except extra column for expected value
cs_shape = [cs.shape[0], cs.shape[1] + 1]
# note: make sure we copy data as it will be deallocated next
if is_csr:
cs_output_matrices.append(scipy.sparse.csr_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
else:
cs_output_matrices.append(scipy.sparse.csc_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
# free the temporary native indptr, indices, and data
_safe_call(_LIB.LGBM_BoosterFreePredictSparse(out_ptr_indptr, out_ptr_indices, out_ptr_data,
ctypes.c_int(indptr_type), ctypes.c_int(data_type)))
if len(cs_output_matrices) == 1:
return cs_output_matrices[0]
return cs_output_matrices
def __pred_for_csr(self, csr, start_iteration, num_iteration, predict_type):
"""Predict for a CSR data."""
def inner_predict(csr, start_iteration, num_iteration, predict_type, preds=None):
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def inner_predict_sparse(csr, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
csr_indices = csr.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSR
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.zeros(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csr, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=True)
nrow = len(csr.indptr) - 1
return matrices, nrow
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csr, start_iteration, num_iteration, predict_type)
nrow = len(csr.indptr) - 1
if nrow > MAX_INT32:
sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow]
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff(sections)]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip_(zip_(sections, sections[1:]),
zip_(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(csr[start_idx:end_idx], start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(csr, start_iteration, num_iteration, predict_type)
def __pred_for_csc(self, csc, start_iteration, num_iteration, predict_type):
"""Predict for a CSC data."""
def inner_predict_sparse(csc, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
csc_indices = csc.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSC
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.zeros(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csc, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=False)
nrow = csc.shape[0]
return matrices, nrow
nrow = csc.shape[0]
if nrow > MAX_INT32:
return self.__pred_for_csr(csc.tocsr(), start_iteration, num_iteration, predict_type)
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csc, start_iteration, num_iteration, predict_type)
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
class Dataset(object):
"""Dataset in LightGBM."""
def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, silent=False,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
reference : Dataset or None, optional (default=None)
If this is Dataset for validation, training data should be used as reference.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query size for Dataset.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
params : dict or None, optional (default=None)
Other parameters for Dataset.
free_raw_data : bool, optional (default=True)
If True, raw data is freed after constructing inner Dataset.
"""
self.handle = None
self.data = data
self.label = label
self.reference = reference
self.weight = weight
self.group = group
self.init_score = init_score
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = copy.deepcopy(params)
self.free_raw_data = free_raw_data
self.used_indices = None
self.need_slice = True
self._predictor = None
self.pandas_categorical = None
self.params_back_up = None
self.feature_penalty = None
self.monotone_constraints = None
self.version = 0
def __del__(self):
try:
self._free_handle()
except AttributeError:
pass
def get_params(self):
"""Get the used parameters in the Dataset.
Returns
-------
params : dict or None
The used parameters in this Dataset object.
"""
if self.params is not None:
# no min_data, nthreads and verbose in this function
dataset_params = _ConfigAliases.get("bin_construct_sample_cnt",
"categorical_feature",
"data_random_seed",
"enable_bundle",
"feature_pre_filter",
"forcedbins_filename",
"group_column",
"header",
"ignore_column",
"is_enable_sparse",
"label_column",
"max_bin",
"max_bin_by_feature",
"min_data_in_bin",
"pre_partition",
"two_round",
"use_missing",
"weight_column",
"zero_as_missing")
return {k: v for k, v in self.params.items() if k in dataset_params}
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
self.need_slice = True
if self.used_indices is not None:
self.data = None
return self
def _set_init_score_by_predictor(self, predictor, data, used_indices=None):
data_has_header = False
if isinstance(data, string_type):
# check data has header or not
data_has_header = any(self.params.get(alias, False) for alias in _ConfigAliases.get("header"))
num_data = self.num_data()
if predictor is not None:
init_score = predictor.predict(data,
raw_score=True,
data_has_header=data_has_header,
is_reshape=False)
if used_indices is not None:
assert not self.need_slice
if isinstance(data, string_type):
sub_init_score = np.zeros(num_data * predictor.num_class, dtype=np.float32)
assert num_data == len(used_indices)
for i in range_(len(used_indices)):
for j in range_(predictor.num_class):
sub_init_score[i * predictor.num_class + j] = init_score[used_indices[i] * predictor.num_class + j]
init_score = sub_init_score
if predictor.num_class > 1:
# need to regroup init_score
new_init_score = np.zeros(init_score.size, dtype=np.float32)
for i in range_(num_data):
for j in range_(predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j]
init_score = new_init_score
elif self.init_score is not None:
init_score = np.zeros(self.init_score.shape, dtype=np.float32)
else:
return self
self.set_init_score(init_score)
def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
if reference is not None:
self.pandas_categorical = reference.pandas_categorical
categorical_feature = reference.categorical_feature
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data,
feature_name,
categorical_feature,
self.pandas_categorical)
label = _label_from_pandas(label)
# process for args
params = {} if params is None else params
args_names = (getattr(self.__class__, '_lazy_init')
.__code__
.co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
for key, _ in params.items():
if key in args_names:
warnings.warn('{0} keyword has been found in `params` and will be ignored.\n'
'Please use {0} argument of the Dataset constructor to pass this parameter.'
.format(key))
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, string_type) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, integer_types):
categorical_indices.add(name)
else:
raise TypeError("Wrong type({}) or unknown name({}) in categorical_feature"
.format(type(name).__name__, name))
if categorical_indices:
for cat_alias in _ConfigAliases.get("categorical_feature"):
if cat_alias in params:
warnings.warn('{} in param dict is overridden.'.format(cat_alias))
params.pop(cat_alias, None)
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
self.params = params
# process for reference dataset
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
# start construct data
if isinstance(data, string_type):
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(data),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
elif isinstance(data, list) and len(data) > 0 and all(isinstance(x, np.ndarray) for x in data):
self.__init_from_list_np2d(data, params_str, ref_dataset)
elif isinstance(data, DataTable):
self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except BaseException:
raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__))
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
if isinstance(predictor, _InnerPredictor):
if self._predictor is None and init_score is not None:
warnings.warn("The init_score will be overridden by the prediction of init_model.")
self._set_init_score_by_predictor(predictor, data)
elif init_score is not None:
self.set_init_score(init_score)
elif predictor is not None:
raise TypeError('Wrong predictor type {}'.format(type(predictor).__name__))
# set feature names
return self.set_feature_name(feature_name)
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""Initialize data from a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.zeros((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csr(self, csr, params_str, ref_dataset):
"""Initialize data from a CSR matrix."""
if len(csr.indices) != len(csr.data):
raise ValueError('Length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csc(self, csc, params_str, ref_dataset):
"""Initialize data from a CSC matrix."""
if len(csc.indices) != len(csc.data):
raise ValueError('Length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def construct(self):
"""Lazy init.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
if self.handle is None:
if self.reference is not None:
reference_params = self.reference.get_params()
if self.get_params() != reference_params:
warnings.warn('Overriding the parameters from Reference Dataset.')
self._update_params(reference_params)
if self.used_indices is None:
# create valid
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
assert used_indices.flags.c_contiguous
if self.reference.group is not None:
group_info = np.array(self.reference.group).astype(np.int32, copy=False)
_, self.group = np.unique(np.repeat(range_(len(group_info)), repeats=group_info)[self.used_indices],
return_counts=True)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if not self.free_raw_data:
self.get_data()
if self.group is not None:
self.set_group(self.group)
if self.get_label() is None:
raise ValueError("Label should not be None.")
if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor:
self.get_data()
self._set_init_score_by_predictor(self._predictor, self.data, used_indices)
else:
# create train
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent=False, params=None):
"""Create validation data align with current Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query size for Dataset.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
Returns
-------
valid : Dataset
Validation Dataset with reference to self.
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
"""
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = sorted(used_indices)
return ret
def save_binary(self, filename):
"""Save Dataset to a binary file.
.. note::
Please note that `init_score` is not saved in binary file.
If you need it, please set it again after loading Dataset.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(filename)))
return self
def _update_params(self, params):
if not params:
return self
params = copy.deepcopy(params)
def update():
if not self.params:
self.params = params
else:
self.params_back_up = copy.deepcopy(self.params)
self.params.update(params)
if self.handle is None:
update()
elif params is not None:
ret = _LIB.LGBM_DatasetUpdateParamChecking(
c_str(param_dict_to_str(self.params)),
c_str(param_dict_to_str(params)))
if ret != 0:
# could be updated if data is not freed
if self.data is not None:
update()
self._free_handle()
else:
raise LightGBMError(decode_string(_LIB.LGBM_GetLastError()))
return self
def _reverse_update_params(self):
if self.handle is None:
self.params = copy.deepcopy(self.params_back_up)
self.params_back_up = None
return self
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception("Cannot set %s before construct dataset" % field_name)
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError("Expected np.float32/64 or np.int32, met type({})".format(data.dtype))
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
self.version += 1
return self
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name : string
The field name of the information.
Returns
-------
info : numpy array
A numpy array with information from the Dataset.
"""
if self.handle is None:
raise Exception("Cannot get %s before construct Dataset" % field_name)
tmp_out_len = ctypes.c_int()
out_type = ctypes.c_int()
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
def set_categorical_feature(self, categorical_feature):
"""Set categorical features.
Parameters
----------
categorical_feature : list of int or strings
Names or indices of categorical features.
Returns
-------
self : Dataset
Dataset with set categorical features.
"""
if self.categorical_feature == categorical_feature:
return self
if self.data is not None:
if self.categorical_feature is None:
self.categorical_feature = categorical_feature
return self._free_handle()
elif categorical_feature == 'auto':
warnings.warn('Using categorical_feature in Dataset.')
return self
else:
warnings.warn('categorical_feature in Dataset is overridden.\n'
'New categorical_feature is {}'.format(sorted(list(categorical_feature))))
self.categorical_feature = categorical_feature
return self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""Set predictor for continued training.
It is not recommended for user to call this function.
Please use init_model argument in engine.train() or engine.cv() instead.
"""
if predictor is self._predictor and (predictor is None or predictor.current_iteration() == self._predictor.current_iteration()):
return self
if self.handle is None:
self._predictor = predictor
elif self.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.data)
elif self.used_indices is not None and self.reference is not None and self.reference.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.reference.data, self.used_indices)
else:
raise LightGBMError("Cannot set predictor after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""Set feature name.
Parameters
----------
feature_name : list of strings
Feature names.
Returns
-------
self : Dataset
Dataset with set feature name.
"""
if feature_name != 'auto':
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError("Length of feature_name({}) and num_feature({}) don't match"
.format(len(feature_name), self.num_feature()))
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
return self
def set_label(self, label):
"""Set label of Dataset.
Parameters
----------
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None
The label information to be set into Dataset.
Returns
-------
self : Dataset
Dataset with set label.
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(_label_from_pandas(label), name='label')
self.set_field('label', label)
self.label = self.get_field('label') # original values can be modified at cpp side
return self
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
"""
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
self.weight = self.get_field('weight') # original values can be modified at cpp side
return self
def set_init_score(self, init_score):
"""Set init score of Booster to start from.
Parameters
----------
init_score : list, numpy 1-D array, pandas Series or None
Init score for Booster.
Returns
-------
self : Dataset
Dataset with set init score.
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
self.init_score = self.get_field('init_score') # original values can be modified at cpp side
return self
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group size of each group.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self
def get_feature_name(self):
"""Get the names of columns (features) in the Dataset.
Returns
-------
feature_names : list
The names of columns (features) in the Dataset.
"""
if self.handle is None:
raise LightGBMError("Cannot get feature_name before construct dataset")
num_feature = self.num_feature()
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for i in range_(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
num_feature,
ctypes.byref(tmp_out_len),
reserved_string_buffer_size,
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
if reserved_string_buffer_size < required_string_buffer_size.value:
raise BufferError(
"Allocated feature name buffer size ({}) was inferior to the needed size ({})."
.format(reserved_string_buffer_size, required_string_buffer_size.value)
)
return [string_buffers[i].value.decode('utf-8') for i in range_(num_feature)]
def get_label(self):
"""Get the label of the Dataset.
Returns
-------
label : numpy array or None
The label information from the Dataset.
"""
if self.label is None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""Get the weight of the Dataset.
Returns
-------
weight : numpy array or None
Weight for each data point from the Dataset.
"""
if self.weight is None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""Get the initial score of the Dataset.
Returns
-------
init_score : numpy array or None
Init score of Booster.
"""
if self.init_score is None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_data(self):
"""Get the raw data of the Dataset.
Returns
-------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None
Raw data used in the Dataset construction.
"""
if self.handle is None:
raise Exception("Cannot get data before construct Dataset")
if self.need_slice and self.used_indices is not None and self.reference is not None:
self.data = self.reference.data
if self.data is not None:
if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data):
self.data = self.data[self.used_indices, :]
elif isinstance(self.data, DataFrame):
self.data = self.data.iloc[self.used_indices].copy()
elif isinstance(self.data, DataTable):
self.data = self.data[self.used_indices, :]
else:
warnings.warn("Cannot subset {} type of raw data.\n"
"Returning original raw data".format(type(self.data).__name__))
self.need_slice = False
if self.data is None:
raise LightGBMError("Cannot call `get_data` after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self.data
def get_group(self):
"""Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group size of each group.
"""
if self.group is None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
self.group = np.diff(self.group)
return self.group
def num_data(self):
"""Get the number of rows in the Dataset.
Returns
-------
number_of_rows : int
The number of rows in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int()
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""Get the number of columns (features) in the Dataset.
Returns
-------
number_of_columns : int
The number of columns (features) in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int()
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
def get_ref_chain(self, ref_limit=100):
"""Get a chain of Dataset objects.
Starts with r, then goes to r.reference (if exists),
then to r.reference.reference, etc.
until we hit ``ref_limit`` or a reference loop.
Parameters
----------
ref_limit : int, optional (default=100)
The limit number of references.
Returns
-------
ref_chain : set of Dataset
Chain of references of the Datasets.
"""
head = self
ref_chain = set()
while len(ref_chain) < ref_limit:
if isinstance(head, Dataset):
ref_chain.add(head)
if (head.reference is not None) and (head.reference not in ref_chain):
head = head.reference
else:
break
else:
break
return ref_chain
def add_features_from(self, other):
"""Add features from other Dataset to the current Dataset.
Both Datasets must be constructed before calling this method.
Parameters
----------
other : Dataset
The Dataset to take features from.
Returns
-------
self : Dataset
Dataset with the new features added.
"""
if self.handle is None or other.handle is None:
raise ValueError('Both source and target Datasets must be constructed before adding features')
_safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle))
return self
def _dump_text(self, filename):
"""Save Dataset to a text file.
This format cannot be loaded back in by LightGBM, but is useful for debugging purposes.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetDumpText(
self.construct().handle,
c_str(filename)))
return self
class Booster(object):
"""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent=False):
"""Initialize the Booster.
Parameters
----------
params : dict or None, optional (default=None)
Parameters for Booster.
train_set : Dataset or None, optional (default=None)
Training dataset.
model_file : string or None, optional (default=None)
Path to the model file.
model_str : string or None, optional (default=None)
Model will be loaded from this string.
silent : bool, optional (default=False)
Whether to print messages during construction.
"""
self.handle = None
self.network = False
self.__need_reload_eval_info = True
self._train_data_name = "training"
self.__attr = {}
self.__set_objective_to_none = False
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else copy.deepcopy(params)
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
raise TypeError('Training data should be Dataset instance, met {}'
.format(type(train_set).__name__))
# set network if necessary
for alias in _ConfigAliases.get("machines"):
if alias in params:
machines = params[alias]
if isinstance(machines, string_type):
num_machines = len(machines.split(','))
elif isinstance(machines, (list, set)):
num_machines = len(machines)
machines = ','.join(machines)
else:
raise ValueError("Invalid machines in params.")
self.set_network(machines,
local_listen_port=params.get("local_listen_port", 12400),
listen_time_out=params.get("listen_time_out", 120),
num_machines=params.setdefault("num_machines", num_machines))
break
# construct booster object
train_set.construct()
# copy the parameters from train_set
params.update(train_set.get_params())
params_str = param_dict_to_str(params)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.handle,
c_str(params_str),
ctypes.byref(self.handle)))
# save reference to data
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
# buffer for inner predict
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
self.train_set_version = train_set.version
elif model_file is not None:
# Prediction task
out_num_iterations = ctypes.c_int(0)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif model_str is not None:
self.model_from_string(model_str, not silent)
else:
raise TypeError('Need at least one training dataset or model file or model string '
'to create Booster instance')
self.params = params
def __del__(self):
try:
if self.network:
self.free_network()
except AttributeError:
pass
try:
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.model_to_string(num_iteration=-1)
booster = Booster(model_str=model_str)
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.model_to_string(num_iteration=-1)
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def free_dataset(self):
"""Free Booster's Datasets.
Returns
-------
self : Booster
Booster without Datasets.
"""
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None)
self.__num_dataset = 0
return self
def _free_buffer(self):
self.__inner_predict_buffer = []
self.__is_predicted_cur_iter = []
return self
def set_network(self, machines, local_listen_port=12400,
listen_time_out=120, num_machines=1):
"""Set the network configuration.
Parameters
----------
machines : list, set or string
Names of machines.
local_listen_port : int, optional (default=12400)
TCP listen port for local machines.
listen_time_out : int, optional (default=120)
Socket time-out in minutes.
num_machines : int, optional (default=1)
The number of machines for parallel learning application.
Returns
-------
self : Booster
Booster with set network.
"""
_safe_call(_LIB.LGBM_NetworkInit(c_str(machines),
ctypes.c_int(local_listen_port),
ctypes.c_int(listen_time_out),
ctypes.c_int(num_machines)))
self.network = True
return self
def free_network(self):
"""Free Booster's network.
Returns
-------
self : Booster
Booster with freed network.
"""
_safe_call(_LIB.LGBM_NetworkFree())
self.network = False
return self
def trees_to_dataframe(self):
"""Parse the fitted model and return in an easy-to-read pandas DataFrame.
Returns
-------
result : pandas DataFrame
Returns a pandas DataFrame of the parsed model.
"""
if not PANDAS_INSTALLED:
raise LightGBMError('This method cannot be run without pandas installed')
if self.num_trees() == 0:
raise LightGBMError('There are no trees in this Booster and thus nothing to parse')
def _is_split_node(tree):
return 'split_index' in tree.keys()
def create_node_record(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
def _get_node_index(tree, tree_index):
tree_num = str(tree_index) + '-' if tree_index is not None else ''
is_split = _is_split_node(tree)
node_type = 'S' if is_split else 'L'
# if a single node tree it won't have `leaf_index` so return 0
node_num = str(tree.get('split_index' if is_split else 'leaf_index', 0))
return tree_num + node_type + node_num
def _get_split_feature(tree, feature_names):
if _is_split_node(tree):
if feature_names is not None:
feature_name = feature_names[tree['split_feature']]
else:
feature_name = tree['split_feature']
else:
feature_name = None
return feature_name
def _is_single_node_tree(tree):
return set(tree.keys()) == {'leaf_value'}
# Create the node record, and populate universal data members
node = OrderedDict()
node['tree_index'] = tree_index
node['node_depth'] = node_depth
node['node_index'] = _get_node_index(tree, tree_index)
node['left_child'] = None
node['right_child'] = None
node['parent_index'] = parent_node
node['split_feature'] = _get_split_feature(tree, feature_names)
node['split_gain'] = None
node['threshold'] = None
node['decision_type'] = None
node['missing_direction'] = None
node['missing_type'] = None
node['value'] = None
node['weight'] = None
node['count'] = None
# Update values to reflect node type (leaf or split)
if _is_split_node(tree):
node['left_child'] = _get_node_index(tree['left_child'], tree_index)
node['right_child'] = _get_node_index(tree['right_child'], tree_index)
node['split_gain'] = tree['split_gain']
node['threshold'] = tree['threshold']
node['decision_type'] = tree['decision_type']
node['missing_direction'] = 'left' if tree['default_left'] else 'right'
node['missing_type'] = tree['missing_type']
node['value'] = tree['internal_value']
node['weight'] = tree['internal_weight']
node['count'] = tree['internal_count']
else:
node['value'] = tree['leaf_value']
if not _is_single_node_tree(tree):
node['weight'] = tree['leaf_weight']
node['count'] = tree['leaf_count']
return node
def tree_dict_to_node_list(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
node = create_node_record(tree,
node_depth=node_depth,
tree_index=tree_index,
feature_names=feature_names,
parent_node=parent_node)
res = [node]
if _is_split_node(tree):
# traverse the next level of the tree
children = ['left_child', 'right_child']
for child in children:
subtree_list = tree_dict_to_node_list(
tree[child],
node_depth=node_depth + 1,
tree_index=tree_index,
feature_names=feature_names,
parent_node=node['node_index'])
# In tree format, "subtree_list" is a list of node records (dicts),
# and we add node to the list.
res.extend(subtree_list)
return res
model_dict = self.dump_model()
feature_names = model_dict['feature_names']
model_list = []
for tree in model_dict['tree_info']:
model_list.extend(tree_dict_to_node_list(tree['tree_structure'],
tree_index=tree['tree_index'],
feature_names=feature_names))
return DataFrame(model_list, columns=model_list[0].keys())
def set_train_data_name(self, name):
"""Set the name to the training Dataset.
Parameters
----------
name : string
Name for the training Dataset.
Returns
-------
self : Booster
Booster with set training Dataset name.
"""
self._train_data_name = name
return self
def add_valid(self, data, name):
"""Add validation data.
Parameters
----------
data : Dataset
Validation data.
name : string
Name of validation data.
Returns
-------
self : Booster
Booster with set validation data.
"""
if not isinstance(data, Dataset):
raise TypeError('Validation data should be Dataset instance, met {}'
.format(type(data).__name__))
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, "
"you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
return self
def reset_parameter(self, params):
"""Reset parameters of Booster.
Parameters
----------
params : dict
New parameters for Booster.
Returns
-------
self : Booster
Booster with new parameters.
"""
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
self.params.update(params)
return self
def update(self, train_set=None, fobj=None):
"""Update Booster for one iteration.
Parameters
----------
train_set : Dataset or None, optional (default=None)
Training data.
If None, last training data is used.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) for each sample point.
For binary task, the preds is probability of positive class (or margin in case of specified ``fobj``).
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Returns
-------
is_finished : bool
Whether the update was successfully finished.
"""
# need reset training data
if train_set is None and self.train_set_version != self.train_set.version:
train_set = self.train_set
is_the_same_train_set = False
else:
is_the_same_train_set = train_set is self.train_set and self.train_set_version == train_set.version
if train_set is not None and not is_the_same_train_set:
if not isinstance(train_set, Dataset):
raise TypeError('Training data should be Dataset instance, met {}'
.format(type(train_set).__name__))
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, "
"you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
self.train_set_version = self.train_set.version
is_finished = ctypes.c_int(0)
if fobj is None:
if self.__set_objective_to_none:
raise LightGBMError('Cannot update due to null objective function.')
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
else:
if not self.__set_objective_to_none:
self.reset_parameter({"objective": "none"}).__set_objective_to_none = True
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""Boost Booster for one iteration with customized gradient statistics.
.. note::
For binary task, the score is probability of positive class (or margin in case of custom objective).
For multi-class task, the score is group by class_id first, then group by row_id.
If you want to get i-th row score in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
grad : list or numpy 1-D array
The first order derivative (gradient).
hess : list or numpy 1-D array
The second order derivative (Hessian).
Returns
-------
is_finished : bool
Whether the boost was successfully finished.
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
assert grad.flags.c_contiguous
assert hess.flags.c_contiguous
if len(grad) != len(hess):
raise ValueError("Lengths of gradient({}) and hessian({}) don't match"
.format(len(grad), len(hess)))
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""Rollback one iteration.
Returns
-------
self : Booster
Booster with rolled back one iteration.
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return self
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def num_model_per_iteration(self):
"""Get number of models per iteration.
Returns
-------
model_per_iter : int
The number of models per iteration.
"""
model_per_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
self.handle,
ctypes.byref(model_per_iter)))
return model_per_iter.value
def num_trees(self):
"""Get number of weak sub-models.
Returns
-------
num_trees : int
The number of weak sub-models.
"""
num_trees = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumberOfTotalModel(
self.handle,
ctypes.byref(num_trees)))
return num_trees.value
def upper_bound(self):
"""Get upper bound value of a model.
Returns
-------
upper_bound : double
Upper bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetUpperBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def lower_bound(self):
"""Get lower bound value of a model.
Returns
-------
lower_bound : double
Lower bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLowerBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def eval(self, data, name, feval=None):
"""Evaluate for data.
Parameters
----------
data : Dataset
Data for the evaluating.
name : string
Name of the data.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
eval_data : Dataset
The evaluation dataset.
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For binary task, the preds is probability of positive class (or margin in case of specified ``fobj``).
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range_(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
# need to push new valid data
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""Evaluate for training data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For binary task, the preds is probability of positive class (or margin in case of specified ``fobj``).
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return self.__inner_eval(self._train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""Evaluate for validation data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, valid_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
valid_data : Dataset
The validation dataset.
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For binary task, the preds is probability of positive class (or margin in case of specified ``fobj``).
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return [item for i in range_(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to file.
Parameters
----------
filename : string
Filename to save Booster.
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
self : Booster
Returns self.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
c_str(filename)))
_dump_pandas_categorical(self.pandas_categorical, filename)
return self
def shuffle_models(self, start_iteration=0, end_iteration=-1):
"""Shuffle models.
Parameters
----------
start_iteration : int, optional (default=0)
The first iteration that will be shuffled.
end_iteration : int, optional (default=-1)
The last iteration that will be shuffled.
If <= 0, means the last available iteration.
Returns
-------
self : Booster
Booster with shuffled models.
"""
_safe_call(_LIB.LGBM_BoosterShuffleModels(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(end_iteration)))
return self
def model_from_string(self, model_str, verbose=True):
"""Load Booster from a string.
Parameters
----------
model_str : string
Model will be loaded from this string.
verbose : bool, optional (default=True)
Whether to print messages while loading model.
Returns
-------
self : Booster
Loaded Booster object.
"""
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
self._free_buffer()
self.handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
if verbose:
print('Finished loading model, total used %d iterations' % int(out_num_iterations.value))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
def model_to_string(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to string.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
str_repr : string
String representation of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, re-allocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = string_buffer.value.decode('utf-8')
ret += _dump_pandas_categorical(self.pandas_categorical)
return ret
def dump_model(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
importance_type : string, optional (default="split")
What type of feature importance should be dumped.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode('utf-8'))
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret
def predict(self, data, start_iteration=0, num_iteration=None,
raw_score=False, pred_leaf=False, pred_contrib=False,
data_has_header=False, is_reshape=True, **kwargs):
"""Make a prediction.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
If string, it represents the path to txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
If <= 0, starts from the first iteration.
num_iteration : int or None, optional (default=None)
Total number of iterations used in the prediction.
If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
otherwise, all iterations from ``start_iteration`` are used (no limits).
If <= 0, all iterations from ``start_iteration`` are used (no limits).
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
data_has_header : bool, optional (default=False)
Whether the data has header.
Used only if data is string.
is_reshape : bool, optional (default=True)
If True, result is reshaped to [nrow, ncol].
**kwargs
Other parameters for the prediction.
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
predictor = self._to_predictor(copy.deepcopy(kwargs))
if num_iteration is None:
if start_iteration <= 0:
num_iteration = self.best_iteration
else:
num_iteration = -1
return predictor.predict(data, start_iteration, num_iteration,
raw_score, pred_leaf, pred_contrib,
data_has_header, is_reshape)
def refit(self, data, label, decay_rate=0.9, **kwargs):
"""Refit the existing Booster by new data.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for refit.
If string, it represents the path to txt file.
label : list, numpy 1-D array or pandas Series / one-column DataFrame
Label for refit.
decay_rate : float, optional (default=0.9)
Decay rate of refit,
will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees.
**kwargs
Other parameters for refit.
These parameters will be passed to ``predict`` method.
Returns
-------
result : Booster
Refitted Booster.
"""
if self.__set_objective_to_none:
raise LightGBMError('Cannot refit due to null objective function.')
predictor = self._to_predictor(copy.deepcopy(kwargs))
leaf_preds = predictor.predict(data, -1, pred_leaf=True)
nrow, ncol = leaf_preds.shape
train_set = Dataset(data, label, silent=True)
new_params = copy.deepcopy(self.params)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
_safe_call(_LIB.LGBM_BoosterMerge(
new_booster.handle,
predictor.handle))
leaf_preds = leaf_preds.reshape(-1)
ptr_data, type_ptr_data, _ = c_int_array(leaf_preds)
_safe_call(_LIB.LGBM_BoosterRefit(
new_booster.handle,
ptr_data,
ctypes.c_int(nrow),
ctypes.c_int(ncol)))
new_booster.network = self.network
new_booster.__attr = self.__attr.copy()
return new_booster
def get_leaf_output(self, tree_id, leaf_id):
"""Get the output of a leaf.
Parameters
----------
tree_id : int
The index of the tree.
leaf_id : int
The index of the leaf in the tree.
Returns
-------
result : float
The output of the leaf.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLeafValue(
self.handle,
ctypes.c_int(tree_id),
ctypes.c_int(leaf_id),
ctypes.byref(ret)))
return ret.value
def _to_predictor(self, pred_parameter=None):
"""Convert to predictor."""
predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def num_feature(self):
"""Get number of features.
Returns
-------
num_feature : int
The number of features.
"""
out_num_feature = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumFeature(
self.handle,
ctypes.byref(out_num_feature)))
return out_num_feature.value
def feature_name(self):
"""Get names of features.
Returns
-------
result : list
List with names of features.
"""
num_feature = self.num_feature()
# Get name of features
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for i in range_(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
num_feature,
ctypes.byref(tmp_out_len),
reserved_string_buffer_size,
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
if reserved_string_buffer_size < required_string_buffer_size.value:
raise BufferError(
"Allocated feature name buffer size ({}) was inferior to the needed size ({})."
.format(reserved_string_buffer_size, required_string_buffer_size.value)
)
return [string_buffers[i].value.decode('utf-8') for i in range_(num_feature)]
def feature_importance(self, importance_type='split', iteration=None):
"""Get feature importances.
Parameters
----------
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
iteration : int or None, optional (default=None)
Limit number of iterations in the feature importance calculation.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
Returns
-------
result : numpy array
Array with feature importances.
"""
if iteration is None:
iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
result = np.zeros(self.num_feature(), dtype=np.float64)
_safe_call(_LIB.LGBM_BoosterFeatureImportance(
self.handle,
ctypes.c_int(iteration),
ctypes.c_int(importance_type_int),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if importance_type_int == 0:
return result.astype(np.int32)
else:
return result
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
"""Get split value histogram for the specified feature.
Parameters
----------
feature : int or string
The feature name or index the histogram is calculated for.
If int, interpreted as index.
If string, interpreted as name.
.. warning::
Categorical features are not supported.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, or int and > number of unique split values and ``xgboost_style=True``,
the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
xgboost_style : bool, optional (default=False)
Whether the returned result should be in the same form as it is in XGBoost.
If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function.
If True, the returned value is matrix, in which the first column is the right edges of non-empty bins
and the second one is the histogram values.
Returns
-------
result_tuple : tuple of 2 numpy arrays
If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature
and the bin edges.
result_array_like : numpy array or pandas DataFrame (if pandas is installed)
If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
"""
def add(root):
"""Recursively add thresholds."""
if 'split_index' in root: # non-leaf
if feature_names is not None and isinstance(feature, string_type):
split_feature = feature_names[root['split_feature']]
else:
split_feature = root['split_feature']
if split_feature == feature:
if isinstance(root['threshold'], string_type):
raise LightGBMError('Cannot compute split value histogram for the categorical feature')
else:
values.append(root['threshold'])
add(root['left_child'])
add(root['right_child'])
model = self.dump_model()
feature_names = model.get('feature_names')
tree_infos = model['tree_info']
values = []
for tree_info in tree_infos:
add(tree_info['tree_structure'])
if bins is None or isinstance(bins, integer_types) and xgboost_style:
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
hist, bin_edges = np.histogram(values, bins=bins)
if xgboost_style:
ret = np.column_stack((bin_edges[1:], hist))
ret = ret[ret[:, 1] > 0]
if PANDAS_INSTALLED:
return DataFrame(ret, columns=['SplitValue', 'Count'])
else:
return ret
else:
return hist, bin_edges
def __inner_eval(self, data_name, data_idx, feval=None):
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.zeros(self.__num_inner_eval, dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range_(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i],
result[i], self.__higher_better_inner_eval[i]))
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
feval_ret = feval(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""Predict for training and validation dataset."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64)
# avoid to predict many time in one iteration
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError("Wrong length of predict results for data %d" % (data_idx))
self.__is_predicted_cur_iter[data_idx] = True
return self.__inner_predict_buffer[data_idx]
def __get_eval_info(self):
"""Get inner evaluation count and names."""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
# Get num of inner evals
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
# Get name of evals
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [
ctypes.create_string_buffer(reserved_string_buffer_size) for i in range_(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
self.__num_inner_eval,
ctypes.byref(tmp_out_len),
reserved_string_buffer_size,
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
if reserved_string_buffer_size < required_string_buffer_size.value:
raise BufferError(
"Allocated eval name buffer size ({}) was inferior to the needed size ({})."
.format(reserved_string_buffer_size, required_string_buffer_size.value)
)
self.__name_inner_eval = \
[string_buffers[i].value.decode('utf-8') for i in range_(self.__num_inner_eval)]
self.__higher_better_inner_eval = \
[name.startswith(('auc', 'ndcg@', 'map@')) for name in self.__name_inner_eval]
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : string
The name of the attribute.
Returns
-------
value : string or None
The attribute value.
Returns None if attribute does not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""Set attributes to the Booster.
Parameters
----------
**kwargs
The attributes to set.
Setting a value to None deletes an attribute.
Returns
-------
self : Booster
Booster with set attributes.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, string_type):
raise ValueError("Only string values are accepted")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
return self
| 1 | 25,339 | Please move this to L3115 to group the code logically - all code for `feval` in one place. | microsoft-LightGBM | cpp |
@@ -103,7 +103,7 @@ import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
/** Provides EVMs supporting the appropriate operations for mainnet hard forks. */
-abstract class MainnetEvmRegistries {
+public abstract class MainnetEvmRegistries {
static EVM frontier(final GasCalculator gasCalculator) {
final OperationRegistry registry = new OperationRegistry(); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet;
import org.hyperledger.besu.ethereum.core.Account;
import org.hyperledger.besu.ethereum.vm.EVM;
import org.hyperledger.besu.ethereum.vm.GasCalculator;
import org.hyperledger.besu.ethereum.vm.OperationRegistry;
import org.hyperledger.besu.ethereum.vm.operations.AddModOperation;
import org.hyperledger.besu.ethereum.vm.operations.AddOperation;
import org.hyperledger.besu.ethereum.vm.operations.AddressOperation;
import org.hyperledger.besu.ethereum.vm.operations.AndOperation;
import org.hyperledger.besu.ethereum.vm.operations.BalanceOperation;
import org.hyperledger.besu.ethereum.vm.operations.BaseFeeOperation;
import org.hyperledger.besu.ethereum.vm.operations.BlockHashOperation;
import org.hyperledger.besu.ethereum.vm.operations.ByteOperation;
import org.hyperledger.besu.ethereum.vm.operations.CallCodeOperation;
import org.hyperledger.besu.ethereum.vm.operations.CallDataCopyOperation;
import org.hyperledger.besu.ethereum.vm.operations.CallDataLoadOperation;
import org.hyperledger.besu.ethereum.vm.operations.CallDataSizeOperation;
import org.hyperledger.besu.ethereum.vm.operations.CallOperation;
import org.hyperledger.besu.ethereum.vm.operations.CallValueOperation;
import org.hyperledger.besu.ethereum.vm.operations.CallerOperation;
import org.hyperledger.besu.ethereum.vm.operations.ChainIdOperation;
import org.hyperledger.besu.ethereum.vm.operations.CodeCopyOperation;
import org.hyperledger.besu.ethereum.vm.operations.CodeSizeOperation;
import org.hyperledger.besu.ethereum.vm.operations.CoinbaseOperation;
import org.hyperledger.besu.ethereum.vm.operations.Create2Operation;
import org.hyperledger.besu.ethereum.vm.operations.CreateOperation;
import org.hyperledger.besu.ethereum.vm.operations.DelegateCallOperation;
import org.hyperledger.besu.ethereum.vm.operations.DifficultyOperation;
import org.hyperledger.besu.ethereum.vm.operations.DivOperation;
import org.hyperledger.besu.ethereum.vm.operations.DupOperation;
import org.hyperledger.besu.ethereum.vm.operations.EqOperation;
import org.hyperledger.besu.ethereum.vm.operations.ExpOperation;
import org.hyperledger.besu.ethereum.vm.operations.ExtCodeCopyOperation;
import org.hyperledger.besu.ethereum.vm.operations.ExtCodeHashOperation;
import org.hyperledger.besu.ethereum.vm.operations.ExtCodeSizeOperation;
import org.hyperledger.besu.ethereum.vm.operations.GasLimitOperation;
import org.hyperledger.besu.ethereum.vm.operations.GasOperation;
import org.hyperledger.besu.ethereum.vm.operations.GasPriceOperation;
import org.hyperledger.besu.ethereum.vm.operations.GtOperation;
import org.hyperledger.besu.ethereum.vm.operations.InvalidOperation;
import org.hyperledger.besu.ethereum.vm.operations.IsZeroOperation;
import org.hyperledger.besu.ethereum.vm.operations.JumpDestOperation;
import org.hyperledger.besu.ethereum.vm.operations.JumpOperation;
import org.hyperledger.besu.ethereum.vm.operations.JumpiOperation;
import org.hyperledger.besu.ethereum.vm.operations.LogOperation;
import org.hyperledger.besu.ethereum.vm.operations.LtOperation;
import org.hyperledger.besu.ethereum.vm.operations.MLoadOperation;
import org.hyperledger.besu.ethereum.vm.operations.MSizeOperation;
import org.hyperledger.besu.ethereum.vm.operations.MStore8Operation;
import org.hyperledger.besu.ethereum.vm.operations.MStoreOperation;
import org.hyperledger.besu.ethereum.vm.operations.ModOperation;
import org.hyperledger.besu.ethereum.vm.operations.MulModOperation;
import org.hyperledger.besu.ethereum.vm.operations.MulOperation;
import org.hyperledger.besu.ethereum.vm.operations.NotOperation;
import org.hyperledger.besu.ethereum.vm.operations.NumberOperation;
import org.hyperledger.besu.ethereum.vm.operations.OrOperation;
import org.hyperledger.besu.ethereum.vm.operations.OriginOperation;
import org.hyperledger.besu.ethereum.vm.operations.PCOperation;
import org.hyperledger.besu.ethereum.vm.operations.PopOperation;
import org.hyperledger.besu.ethereum.vm.operations.PushOperation;
import org.hyperledger.besu.ethereum.vm.operations.ReturnDataCopyOperation;
import org.hyperledger.besu.ethereum.vm.operations.ReturnDataSizeOperation;
import org.hyperledger.besu.ethereum.vm.operations.ReturnOperation;
import org.hyperledger.besu.ethereum.vm.operations.RevertOperation;
import org.hyperledger.besu.ethereum.vm.operations.SDivOperation;
import org.hyperledger.besu.ethereum.vm.operations.SGtOperation;
import org.hyperledger.besu.ethereum.vm.operations.SLoadOperation;
import org.hyperledger.besu.ethereum.vm.operations.SLtOperation;
import org.hyperledger.besu.ethereum.vm.operations.SModOperation;
import org.hyperledger.besu.ethereum.vm.operations.SStoreOperation;
import org.hyperledger.besu.ethereum.vm.operations.SarOperation;
import org.hyperledger.besu.ethereum.vm.operations.SelfBalanceOperation;
import org.hyperledger.besu.ethereum.vm.operations.SelfDestructOperation;
import org.hyperledger.besu.ethereum.vm.operations.Sha3Operation;
import org.hyperledger.besu.ethereum.vm.operations.ShlOperation;
import org.hyperledger.besu.ethereum.vm.operations.ShrOperation;
import org.hyperledger.besu.ethereum.vm.operations.SignExtendOperation;
import org.hyperledger.besu.ethereum.vm.operations.StaticCallOperation;
import org.hyperledger.besu.ethereum.vm.operations.StopOperation;
import org.hyperledger.besu.ethereum.vm.operations.SubOperation;
import org.hyperledger.besu.ethereum.vm.operations.SwapOperation;
import org.hyperledger.besu.ethereum.vm.operations.TimestampOperation;
import org.hyperledger.besu.ethereum.vm.operations.XorOperation;
import java.math.BigInteger;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
/** Provides EVMs supporting the appropriate operations for mainnet hard forks. */
abstract class MainnetEvmRegistries {
static EVM frontier(final GasCalculator gasCalculator) {
final OperationRegistry registry = new OperationRegistry();
registerFrontierOpcodes(registry, gasCalculator, Account.DEFAULT_VERSION);
return new EVM(registry, gasCalculator);
}
static EVM homestead(final GasCalculator gasCalculator) {
final OperationRegistry registry = new OperationRegistry();
registerHomesteadOpcodes(registry, gasCalculator, Account.DEFAULT_VERSION);
return new EVM(registry, gasCalculator);
}
static EVM byzantium(final GasCalculator gasCalculator) {
final OperationRegistry registry = new OperationRegistry();
registerByzantiumOpcodes(registry, gasCalculator, Account.DEFAULT_VERSION);
return new EVM(registry, gasCalculator);
}
static EVM constantinople(final GasCalculator gasCalculator) {
final OperationRegistry registry = new OperationRegistry();
registerConstantinopleOpcodes(registry, gasCalculator, Account.DEFAULT_VERSION);
return new EVM(registry, gasCalculator);
}
static EVM istanbul(final GasCalculator gasCalculator, final BigInteger chainId) {
final OperationRegistry registry = new OperationRegistry();
registerIstanbulOpcodes(registry, gasCalculator, Account.DEFAULT_VERSION, chainId);
return new EVM(registry, gasCalculator);
}
static EVM london(final GasCalculator gasCalculator, final BigInteger chainId) {
final OperationRegistry registry = new OperationRegistry();
registerLondonOpcodes(registry, gasCalculator, Account.DEFAULT_VERSION, chainId);
return new EVM(registry, gasCalculator);
}
private static void registerFrontierOpcodes(
final OperationRegistry registry,
final GasCalculator gasCalculator,
final int accountVersion) {
registry.put(new AddOperation(gasCalculator), accountVersion);
registry.put(new AddOperation(gasCalculator), accountVersion);
registry.put(new MulOperation(gasCalculator), accountVersion);
registry.put(new SubOperation(gasCalculator), accountVersion);
registry.put(new DivOperation(gasCalculator), accountVersion);
registry.put(new SDivOperation(gasCalculator), accountVersion);
registry.put(new ModOperation(gasCalculator), accountVersion);
registry.put(new SModOperation(gasCalculator), accountVersion);
registry.put(new ExpOperation(gasCalculator), accountVersion);
registry.put(new AddModOperation(gasCalculator), accountVersion);
registry.put(new MulModOperation(gasCalculator), accountVersion);
registry.put(new SignExtendOperation(gasCalculator), accountVersion);
registry.put(new LtOperation(gasCalculator), accountVersion);
registry.put(new GtOperation(gasCalculator), accountVersion);
registry.put(new SLtOperation(gasCalculator), accountVersion);
registry.put(new SGtOperation(gasCalculator), accountVersion);
registry.put(new EqOperation(gasCalculator), accountVersion);
registry.put(new IsZeroOperation(gasCalculator), accountVersion);
registry.put(new AndOperation(gasCalculator), accountVersion);
registry.put(new OrOperation(gasCalculator), accountVersion);
registry.put(new XorOperation(gasCalculator), accountVersion);
registry.put(new NotOperation(gasCalculator), accountVersion);
registry.put(new ByteOperation(gasCalculator), accountVersion);
registry.put(new Sha3Operation(gasCalculator), accountVersion);
registry.put(new AddressOperation(gasCalculator), accountVersion);
registry.put(new BalanceOperation(gasCalculator), accountVersion);
registry.put(new OriginOperation(gasCalculator), accountVersion);
registry.put(new CallerOperation(gasCalculator), accountVersion);
registry.put(new CallValueOperation(gasCalculator), accountVersion);
registry.put(new CallDataLoadOperation(gasCalculator), accountVersion);
registry.put(new CallDataSizeOperation(gasCalculator), accountVersion);
registry.put(new CallDataCopyOperation(gasCalculator), accountVersion);
registry.put(new CodeSizeOperation(gasCalculator), accountVersion);
registry.put(new CodeCopyOperation(gasCalculator), accountVersion);
registry.put(new GasPriceOperation(gasCalculator), accountVersion);
registry.put(new ExtCodeCopyOperation(gasCalculator), accountVersion);
registry.put(new ExtCodeSizeOperation(gasCalculator), accountVersion);
registry.put(new BlockHashOperation(gasCalculator), accountVersion);
registry.put(new CoinbaseOperation(gasCalculator), accountVersion);
registry.put(new TimestampOperation(gasCalculator), accountVersion);
registry.put(new NumberOperation(gasCalculator), accountVersion);
registry.put(new DifficultyOperation(gasCalculator), accountVersion);
registry.put(new GasLimitOperation(gasCalculator), accountVersion);
registry.put(new PopOperation(gasCalculator), accountVersion);
registry.put(new MLoadOperation(gasCalculator), accountVersion);
registry.put(new MStoreOperation(gasCalculator), accountVersion);
registry.put(new MStore8Operation(gasCalculator), accountVersion);
registry.put(new SLoadOperation(gasCalculator), accountVersion);
registry.put(
new SStoreOperation(gasCalculator, SStoreOperation.FRONTIER_MINIMUM), accountVersion);
registry.put(new JumpOperation(gasCalculator), accountVersion);
registry.put(new JumpiOperation(gasCalculator), accountVersion);
registry.put(new PCOperation(gasCalculator), accountVersion);
registry.put(new MSizeOperation(gasCalculator), accountVersion);
registry.put(new GasOperation(gasCalculator), accountVersion);
registry.put(new JumpDestOperation(gasCalculator), accountVersion);
registry.put(new ReturnOperation(gasCalculator), accountVersion);
registry.put(new InvalidOperation(gasCalculator), accountVersion);
registry.put(new StopOperation(gasCalculator), accountVersion);
registry.put(new SelfDestructOperation(gasCalculator), accountVersion);
registry.put(new CreateOperation(gasCalculator), accountVersion);
registry.put(new CallOperation(gasCalculator), accountVersion);
registry.put(new CallCodeOperation(gasCalculator), accountVersion);
// Register the PUSH1, PUSH2, ..., PUSH32 operations.
for (int i = 1; i <= 32; ++i) {
registry.put(new PushOperation(i, gasCalculator), accountVersion);
}
// Register the DUP1, DUP2, ..., DUP16 operations.
for (int i = 1; i <= 16; ++i) {
registry.put(new DupOperation(i, gasCalculator), accountVersion);
}
// Register the SWAP1, SWAP2, ..., SWAP16 operations.
for (int i = 1; i <= 16; ++i) {
registry.put(new SwapOperation(i, gasCalculator), accountVersion);
}
// Register the LOG0, LOG1, ..., LOG4 operations.
for (int i = 0; i < 5; ++i) {
registry.put(new LogOperation(i, gasCalculator), accountVersion);
}
}
private static void registerHomesteadOpcodes(
final OperationRegistry registry,
final GasCalculator gasCalculator,
final int accountVersion) {
registerFrontierOpcodes(registry, gasCalculator, accountVersion);
registry.put(new DelegateCallOperation(gasCalculator), accountVersion);
}
private static void registerByzantiumOpcodes(
final OperationRegistry registry,
final GasCalculator gasCalculator,
final int accountVersion) {
registerHomesteadOpcodes(registry, gasCalculator, accountVersion);
registry.put(new ReturnDataCopyOperation(gasCalculator), accountVersion);
registry.put(new ReturnDataSizeOperation(gasCalculator), accountVersion);
registry.put(new RevertOperation(gasCalculator), accountVersion);
registry.put(new StaticCallOperation(gasCalculator), accountVersion);
}
private static void registerConstantinopleOpcodes(
final OperationRegistry registry,
final GasCalculator gasCalculator,
final int accountVersion) {
registerByzantiumOpcodes(registry, gasCalculator, accountVersion);
registry.put(new Create2Operation(gasCalculator), accountVersion);
registry.put(new SarOperation(gasCalculator), accountVersion);
registry.put(new ShlOperation(gasCalculator), accountVersion);
registry.put(new ShrOperation(gasCalculator), accountVersion);
registry.put(new ExtCodeHashOperation(gasCalculator), accountVersion);
}
private static void registerIstanbulOpcodes(
final OperationRegistry registry,
final GasCalculator gasCalculator,
final int accountVersion,
final BigInteger chainId) {
registerConstantinopleOpcodes(registry, gasCalculator, accountVersion);
registry.put(
new ChainIdOperation(gasCalculator, Bytes32.leftPad(Bytes.of(chainId.toByteArray()))),
Account.DEFAULT_VERSION);
registry.put(new SelfBalanceOperation(gasCalculator), Account.DEFAULT_VERSION);
registry.put(
new SStoreOperation(gasCalculator, SStoreOperation.EIP_1706_MINIMUM),
Account.DEFAULT_VERSION);
}
private static void registerLondonOpcodes(
final OperationRegistry registry,
final GasCalculator gasCalculator,
final int accountVersion,
final BigInteger chainId) {
registerIstanbulOpcodes(registry, gasCalculator, accountVersion, chainId);
registry.put(new BaseFeeOperation(gasCalculator), Account.DEFAULT_VERSION);
}
}
| 1 | 25,703 | This was done to wrap the operation so it could be spied upon during testing. Likely needs to be undone once an alternative test is discovered. | hyperledger-besu | java |
@@ -1062,7 +1062,7 @@ func nextReqFromMsg(msg []byte) (time.Time, int, bool, error) {
if err := json.Unmarshal(msg, &cr); err != nil {
return time.Time{}, -1, false, err
}
- return cr.Expires, cr.Batch, cr.NoWait, nil
+ return time.Now().Add(time.Duration(cr.Expires) * time.Millisecond), cr.Batch, cr.NoWait, nil
}
// Naked batch size here for backward compatibility.
bs := 1 | 1 | // Copyright 2019-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
mrand "math/rand"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/nats-io/nuid"
"golang.org/x/time/rate"
)
type ConsumerInfo struct {
Stream string `json:"stream_name"`
Name string `json:"name"`
Created time.Time `json:"created"`
Config ConsumerConfig `json:"config"`
Delivered SequencePair `json:"delivered"`
AckFloor SequencePair `json:"ack_floor"`
NumPending int `json:"num_pending"`
NumRedelivered int `json:"num_redelivered"`
NumWaiting int `json:"num_waiting"`
}
type ConsumerConfig struct {
Durable string `json:"durable_name,omitempty"`
DeliverSubject string `json:"deliver_subject,omitempty"`
DeliverPolicy DeliverPolicy `json:"deliver_policy"`
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
AckPolicy AckPolicy `json:"ack_policy"`
AckWait time.Duration `json:"ack_wait,omitempty"`
MaxDeliver int `json:"max_deliver,omitempty"`
FilterSubject string `json:"filter_subject,omitempty"`
ReplayPolicy ReplayPolicy `json:"replay_policy"`
RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec
SampleFrequency string `json:"sample_freq,omitempty"`
MaxWaiting int `json:"max_waiting,omitempty"`
}
type CreateConsumerRequest struct {
Stream string `json:"stream_name"`
Config ConsumerConfig `json:"config"`
}
// DeliverPolicy determines how the consumer should select the first message to deliver.
type DeliverPolicy int
const (
// DeliverAll will be the default so can be omitted from the request.
DeliverAll DeliverPolicy = iota
// DeliverLast will start the consumer with the last sequence received.
DeliverLast
// DeliverNew will only deliver new messages that are sent after the consumer is created.
DeliverNew
// DeliverByStartSequence will look for a defined starting sequence to start.
DeliverByStartSequence
// DeliverByStartTime will select the first messsage with a timestamp >= to StartTime
DeliverByStartTime
)
func (dp DeliverPolicy) String() string {
switch dp {
case DeliverAll:
return "all"
case DeliverLast:
return "last"
case DeliverNew:
return "new"
case DeliverByStartSequence:
return "by_start_sequence"
case DeliverByStartTime:
return "by_start_time"
default:
return "undefined"
}
}
// AckPolicy determines how the consumer should acknowledge delivered messages.
type AckPolicy int
const (
// AckNone requires no acks for delivered messages.
AckNone AckPolicy = iota
// AckAll when acking a sequence number, this implicitly acks all sequences below this one as well.
AckAll
// AckExplicit requires ack or nack for all messages.
AckExplicit
)
func (a AckPolicy) String() string {
switch a {
case AckNone:
return "none"
case AckAll:
return "all"
default:
return "explicit"
}
}
// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream.
type ReplayPolicy int
const (
// ReplayInstant will replay messages as fast as possible.
ReplayInstant ReplayPolicy = iota
// ReplayOriginal will maintain the same timing as the messages were received.
ReplayOriginal
)
func (r ReplayPolicy) String() string {
switch r {
case ReplayInstant:
return "instant"
default:
return "original"
}
}
// OK
const OK = "+OK"
// Ack responses. Note that a nil or no payload is same as AckAck
var (
// Ack
AckAck = []byte("+ACK") // nil or no payload to ack subject also means ACK
AckOK = []byte(OK) // deprecated but +OK meant ack as well.
// Nack
AckNak = []byte("-NAK")
// Progress indicator
AckProgress = []byte("+WPI")
// Ack + Deliver the next message(s).
AckNext = []byte("+NXT")
// Terminate delivery of the message.
AckTerm = []byte("+TERM")
)
// Consumer is a jetstream consumer.
type Consumer struct {
mu sync.Mutex
mset *Stream
acc *Account
name string
stream string
sseq uint64
dseq uint64
adflr uint64
asflr uint64
dsubj string
rlimit *rate.Limiter
reqSub *subscription
ackSub *subscription
ackReplyT string
nextMsgSubj string
pending map[uint64]int64
ptmr *time.Timer
rdq []uint64
rdc map[uint64]uint64
maxdc uint64
waiting *waitQueue
config ConsumerConfig
store ConsumerStore
active bool
replay bool
filterWC bool
dtmr *time.Timer
dthresh time.Duration
fch chan struct{}
qch chan struct{}
inch chan bool
sfreq int32
ackEventT string
deliveryExcEventT string
created time.Time
closed bool
}
const (
// JsAckWaitDefault is the default AckWait, only applicable on explicit ack policy observables.
JsAckWaitDefault = 30 * time.Second
// JsDeleteWaitTimeDefault is the default amount of time we will wait for non-durable
// observables to be in an inactive state before deleting them.
JsDeleteWaitTimeDefault = 5 * time.Second
)
func (mset *Stream) AddConsumer(config *ConsumerConfig) (*Consumer, error) {
if config == nil {
return nil, fmt.Errorf("consumer config required")
}
var err error
// For now expect a literal subject if its not empty. Empty means work queue mode (pull mode).
if config.DeliverSubject != _EMPTY_ {
if !subjectIsLiteral(config.DeliverSubject) {
return nil, fmt.Errorf("consumer deliver subject has wildcards")
}
if mset.deliveryFormsCycle(config.DeliverSubject) {
return nil, fmt.Errorf("consumer deliver subject forms a cycle")
}
if config.MaxWaiting != 0 {
return nil, fmt.Errorf("consumer in push mode can not set max waiting")
}
} else {
// Pull mode / work queue mode require explicit ack.
if config.AckPolicy != AckExplicit {
return nil, fmt.Errorf("consumer in pull mode requires explicit ack policy")
}
// They are also required to be durable since otherwise we will not know when to
// clean them up.
if config.Durable == _EMPTY_ {
return nil, fmt.Errorf("consumer in pull mode requires a durable name")
}
if config.RateLimit > 0 {
return nil, fmt.Errorf("consumer in pull mode can not have rate limit set")
}
if config.MaxWaiting < 0 {
return nil, fmt.Errorf("consumer max waiting needs to be positive")
}
// Set to default if not specified.
if config.MaxWaiting == 0 {
config.MaxWaiting = JSWaitQueueDefaultMax
}
}
// Setup proper default for ack wait if we are in explicit ack mode.
if config.AckWait == 0 && (config.AckPolicy == AckExplicit || config.AckPolicy == AckAll) {
config.AckWait = JsAckWaitDefault
}
// Setup default of -1, meaning no limit for MaxDeliver.
if config.MaxDeliver == 0 {
config.MaxDeliver = -1
}
// Make sure any partition subject is also a literal.
if config.FilterSubject != "" {
// Make sure this is a valid partition of the interest subjects.
if !mset.validSubject(config.FilterSubject) {
return nil, fmt.Errorf("consumer filter subject is not a valid subset of the interest subjects")
}
if config.AckPolicy == AckAll {
return nil, fmt.Errorf("consumer with filter subject can not have an ack policy of ack all")
}
}
// Check on start position conflicts.
switch config.DeliverPolicy {
case DeliverAll:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver all, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver all, but optional start time is also set")
}
case DeliverLast:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver last, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver last, but optional start time is also set")
}
case DeliverNew:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver new, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver new, but optional start time is also set")
}
case DeliverByStartSequence:
if config.OptStartSeq == 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver by start sequence, but optional start sequence is not set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver by start sequence, but optional start time is also set")
}
case DeliverByStartTime:
if config.OptStartTime == nil {
return nil, fmt.Errorf("consumer delivery policy is deliver by start time, but optional start time is not set")
}
if config.OptStartSeq != 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver by start time, but optional start sequence is also set")
}
}
sampleFreq := 0
if config.SampleFrequency != "" {
s := strings.TrimSuffix(config.SampleFrequency, "%")
sampleFreq, err = strconv.Atoi(s)
if err != nil {
return nil, fmt.Errorf("failed to parse consumer sampling configuration: %v", err)
}
}
// Hold mset lock here.
mset.mu.Lock()
// If this one is durable and already exists, we let that be ok as long as the configs match.
if isDurableConsumer(config) {
if eo, ok := mset.consumers[config.Durable]; ok {
mset.mu.Unlock()
ocfg := eo.Config()
if reflect.DeepEqual(&ocfg, config) {
return eo, nil
} else {
// If we are a push mode and not active and the only difference
// is deliver subject then update and return.
if configsEqualSansDelivery(ocfg, *config) && eo.hasNoLocalInterest() {
eo.updateDeliverSubject(config.DeliverSubject)
return eo, nil
} else {
return nil, fmt.Errorf("consumer already exists")
}
}
}
}
// Check for any limits, if the config for the consumer sets a limit we check against that
// but if not we use the value from account limits, if account limits is more restrictive
// than stream config we prefer the account limits to handle cases where account limits are
// updated during the lifecycle of the stream
maxc := mset.config.MaxConsumers
if mset.config.MaxConsumers <= 0 || mset.jsa.limits.MaxConsumers < mset.config.MaxConsumers {
maxc = mset.jsa.limits.MaxConsumers
}
if maxc > 0 && len(mset.consumers) >= maxc {
mset.mu.Unlock()
return nil, fmt.Errorf("maximum consumers limit reached")
}
// Check on stream type conflicts.
switch mset.config.Retention {
case WorkQueuePolicy:
// Force explicit acks here.
if config.AckPolicy != AckExplicit {
mset.mu.Unlock()
return nil, fmt.Errorf("workqueue stream requires explicit ack")
}
if len(mset.consumers) > 0 {
if config.FilterSubject == _EMPTY_ {
mset.mu.Unlock()
return nil, fmt.Errorf("multiple non-filtered observables not allowed on workqueue stream")
} else if !mset.partitionUnique(config.FilterSubject) {
// We have a partition but it is not unique amongst the others.
mset.mu.Unlock()
return nil, fmt.Errorf("filtered consumer not unique on workqueue stream")
}
}
if config.DeliverPolicy != DeliverAll {
mset.mu.Unlock()
return nil, fmt.Errorf("consumer must be deliver all on workqueue stream")
}
}
// Set name, which will be durable name if set, otherwise we create one at random.
o := &Consumer{mset: mset,
config: *config,
dsubj: config.DeliverSubject,
active: true,
qch: make(chan struct{}),
fch: make(chan struct{}),
sfreq: int32(sampleFreq),
maxdc: uint64(config.MaxDeliver),
created: time.Now().UTC(),
}
if isDurableConsumer(config) {
if len(config.Durable) > JSMaxNameLen {
mset.mu.Unlock()
return nil, fmt.Errorf("consumer name is too long, maximum allowed is %d", JSMaxNameLen)
}
o.name = config.Durable
if o.isPullMode() {
o.waiting = newWaitQueue(config.MaxWaiting)
}
} else {
for {
o.name = createConsumerName()
if _, ok := mset.consumers[o.name]; !ok {
break
}
}
}
// Check if we have a rate limit set.
if config.RateLimit != 0 {
// TODO(dlc) - Make sane values or error if not sane?
// We are configured in bits per sec so adjust to bytes.
rl := rate.Limit(config.RateLimit / 8)
// Burst should be set to maximum msg size for this account, etc.
var burst int
if mset.config.MaxMsgSize > 0 {
burst = int(mset.config.MaxMsgSize)
} else if mset.jsa.account.limits.mpay > 0 {
burst = int(mset.jsa.account.limits.mpay)
} else {
s := mset.jsa.account.srv
burst = int(s.getOpts().MaxPayload)
}
o.rlimit = rate.NewLimiter(rl, burst)
}
// Check if we have filtered subject that is a wildcard.
if config.FilterSubject != _EMPTY_ && !subjectIsLiteral(config.FilterSubject) {
o.filterWC = true
}
// already under lock, mset.Name() would deadlock
o.stream = mset.config.Name
o.ackEventT = JSMetricConsumerAckPre + "." + o.stream + "." + o.name
o.deliveryExcEventT = JSAdvisoryConsumerMaxDeliveryExceedPre + "." + o.stream + "." + o.name
store, err := mset.store.ConsumerStore(o.name, config)
if err != nil {
mset.mu.Unlock()
return nil, fmt.Errorf("error creating store for observable: %v", err)
}
o.store = store
if !isValidName(o.name) {
mset.mu.Unlock()
return nil, fmt.Errorf("durable name can not contain '.', '*', '>'")
}
// Select starting sequence number
o.selectStartingSeqNo()
// Now register with mset and create the ack subscription.
c := mset.client
if c == nil {
mset.mu.Unlock()
return nil, fmt.Errorf("stream not valid")
}
s, a := c.srv, c.acc
o.acc = a
// Check if we already have this one registered.
if eo, ok := mset.consumers[o.name]; ok {
mset.mu.Unlock()
if !o.isDurable() || !o.isPushMode() {
return nil, fmt.Errorf("consumer already exists")
}
// If we are here we have already registered this durable. If it is still active that is an error.
if eo.Active() {
return nil, fmt.Errorf("consumer already exists and is still active")
}
// Since we are here this means we have a potentially new durable so we should update here.
// Check that configs are the same.
if !configsEqualSansDelivery(o.config, eo.config) {
return nil, fmt.Errorf("consumer replacement durable config not the same")
}
// Once we are here we have a replacement push-based durable.
eo.updateDeliverSubject(o.config.DeliverSubject)
return eo, nil
}
// Set up the ack subscription for this observable. Will use wildcard for all acks.
// We will remember the template to generate replies with sequence numbers and use
// that to scanf them back in.
mn := mset.config.Name
pre := fmt.Sprintf(jsAckT, mn, o.name)
o.ackReplyT = fmt.Sprintf("%s.%%d.%%d.%%d.%%d", pre)
ackSubj := fmt.Sprintf("%s.*.*.*.*", pre)
if sub, err := mset.subscribeInternal(ackSubj, o.processAck); err != nil {
mset.mu.Unlock()
return nil, err
} else {
o.ackSub = sub
}
// Setup the internal sub for next message requests.
if !o.isPushMode() {
o.nextMsgSubj = fmt.Sprintf(JSApiRequestNextT, mn, o.name)
if sub, err := mset.subscribeInternal(o.nextMsgSubj, o.processNextMsgReq); err != nil {
mset.mu.Unlock()
o.deleteWithoutAdvisory()
return nil, err
} else {
o.reqSub = sub
}
}
mset.consumers[o.name] = o
mset.mu.Unlock()
// If push mode, register for notifications on interest.
if o.isPushMode() {
o.dthresh = JsDeleteWaitTimeDefault
o.inch = make(chan bool, 4)
a.sl.RegisterNotification(config.DeliverSubject, o.inch)
o.active = o.hasDeliveryInterest(<-o.inch)
// Check if we are not durable that the delivery subject has interest.
if !o.isDurable() && !o.active {
o.deleteWithoutAdvisory()
return nil, fmt.Errorf("consumer requires interest for delivery subject when ephemeral")
}
}
// If we are not in ReplayInstant mode mark us as in replay state until resolved.
if config.ReplayPolicy != ReplayInstant {
o.replay = true
}
// Now start up Go routine to deliver msgs.
go o.loopAndDeliverMsgs(s, a)
// Startup our state update loop.
go o.updateStateLoop()
o.sendCreateAdvisory()
return o, nil
}
// We need to make sure we protect access to the sendq.
// Do all advisory sends here.
// Lock should be held on entry but will be released.
func (o *Consumer) sendAdvisory(subj string, msg []byte) {
if o.mset != nil && o.mset.sendq != nil {
sendq := o.mset.sendq
o.mu.Unlock()
sendq <- &jsPubMsg{subj, subj, _EMPTY_, nil, msg, nil, 0}
o.mu.Lock()
}
}
func (o *Consumer) sendDeleteAdvisoryLocked() {
e := JSConsumerActionAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
Action: DeleteEvent,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
subj := JSAdvisoryConsumerDeletedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
func (o *Consumer) sendCreateAdvisory() {
o.mu.Lock()
defer o.mu.Unlock()
e := JSConsumerActionAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
Action: CreateEvent,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
subj := JSAdvisoryConsumerCreatedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
// Created returns created time.
func (o *Consumer) Created() time.Time {
o.mu.Lock()
created := o.created
o.mu.Unlock()
return created
}
// Internal to allow creation time to be restored.
func (o *Consumer) setCreated(created time.Time) {
o.mu.Lock()
o.created = created
o.mu.Unlock()
}
// This will check for extended interest in a subject. If we have local interest we just return
// that, but in the absence of local interest and presence of gateways or service imports we need
// to check those as well.
func (o *Consumer) hasDeliveryInterest(localInterest bool) bool {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return false
}
acc := o.acc
deliver := o.config.DeliverSubject
o.mu.Unlock()
if localInterest {
return true
}
// If we are here check gateways.
if acc.srv != nil && acc.srv.gateway.enabled {
gw := acc.srv.gateway
gw.RLock()
for _, gwc := range gw.outo {
psi, qr := gwc.gatewayInterest(acc.Name, deliver)
if psi || qr != nil {
gw.RUnlock()
return true
}
}
gw.RUnlock()
}
return false
}
// This processes an update to the local interest for a deliver subject.
func (o *Consumer) updateDeliveryInterest(localInterest bool) {
interest := o.hasDeliveryInterest(localInterest)
o.mu.Lock()
mset := o.mset
if mset == nil || o.isPullMode() {
o.mu.Unlock()
return
}
shouldSignal := interest && !o.active
o.active = interest
// Stop and clear the delete timer always.
stopAndClearTimer(&o.dtmr)
// If we do not have interest anymore and we are not durable start
// a timer to delete us. We wait for a bit in case of server reconnect.
if !o.isDurable() && !interest {
o.dtmr = time.AfterFunc(o.dthresh, func() { o.Delete() })
}
o.mu.Unlock()
if shouldSignal {
mset.signalConsumers()
}
}
// Config returns the consumer's configuration.
func (o *Consumer) Config() ConsumerConfig {
o.mu.Lock()
defer o.mu.Unlock()
return o.config
}
// This is a config change for the delivery subject for a
// push based consumer.
func (o *Consumer) updateDeliverSubject(newDeliver string) {
// Update the config and the dsubj
o.mu.Lock()
defer o.mu.Unlock()
if o.closed || o.isPullMode() {
return
}
o.acc.sl.ClearNotification(o.dsubj, o.inch)
o.dsubj, o.config.DeliverSubject = newDeliver, newDeliver
// When we register new one it will deliver to update state loop.
o.acc.sl.RegisterNotification(newDeliver, o.inch)
}
// Check that configs are equal but allow delivery subjects to be different.
func configsEqualSansDelivery(a, b ConsumerConfig) bool {
// These were copied in so can set Delivery here.
a.DeliverSubject, b.DeliverSubject = _EMPTY_, _EMPTY_
return a == b
}
// Helper to send a reply to an ack.
func (o *Consumer) sendAckReply(subj string) {
o.mu.Lock()
defer o.mu.Unlock()
o.sendAdvisory(subj, nil)
}
// Process a message for the ack reply subject delivered with a message.
func (o *Consumer) processAck(_ *subscription, _ *client, subject, reply string, msg []byte) {
sseq, dseq, dcount, _ := o.ReplyInfo(subject)
var skipAckReply bool
switch {
case len(msg) == 0, bytes.Equal(msg, AckAck), bytes.Equal(msg, AckOK):
o.ackMsg(sseq, dseq, dcount)
case bytes.Equal(msg, AckNext):
o.ackMsg(sseq, dseq, dcount)
o.processNextMsgReq(nil, nil, subject, reply, msg)
skipAckReply = true
case bytes.Equal(msg, AckNak):
o.processNak(sseq, dseq)
case bytes.Equal(msg, AckProgress):
o.progressUpdate(sseq)
case bytes.Equal(msg, AckTerm):
o.processTerm(sseq, dseq, dcount)
}
// Ack the ack if requested.
if len(reply) > 0 && !skipAckReply {
o.sendAckReply(reply)
}
}
// Used to process a working update to delay redelivery.
func (o *Consumer) progressUpdate(seq uint64) {
o.mu.Lock()
if len(o.pending) > 0 {
if _, ok := o.pending[seq]; ok {
o.pending[seq] = time.Now().UnixNano()
}
}
o.mu.Unlock()
}
// Process a NAK.
func (o *Consumer) processNak(sseq, dseq uint64) {
var mset *Stream
o.mu.Lock()
// Check for out of range.
if dseq <= o.adflr || dseq > o.dseq {
o.mu.Unlock()
return
}
// If we are explicit ack make sure this is still on pending list.
if len(o.pending) > 0 {
if _, ok := o.pending[sseq]; !ok {
o.mu.Unlock()
return
}
}
// If already queued up also ignore.
if !o.onRedeliverQueue(sseq) {
o.rdq = append(o.rdq, sseq)
mset = o.mset
}
o.mu.Unlock()
if mset != nil {
mset.signalConsumers()
}
}
// Process a TERM
func (o *Consumer) processTerm(sseq, dseq, dcount uint64) {
// Treat like an ack to suppress redelivery.
o.processAckMsg(sseq, dseq, dcount, false)
o.mu.Lock()
defer o.mu.Unlock()
// Deliver an advisory
e := JSConsumerDeliveryTerminatedAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerDeliveryTerminatedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Deliveries: dcount,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
subj := JSAdvisoryConsumerMsgTerminatedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
// Introduce a small delay in when timer fires to check pending.
// Allows bursts to be treated in same time frame.
const ackWaitDelay = time.Millisecond
// ackWait returns how long to wait to fire the pending timer.
func (o *Consumer) ackWait(next time.Duration) time.Duration {
if next > 0 {
return next + ackWaitDelay
}
return o.config.AckWait + ackWaitDelay
}
// This will restore the state from disk.
func (o *Consumer) readStoredState() error {
if o.store == nil {
return nil
}
state, err := o.store.State()
if err == nil && state != nil {
// FIXME(dlc) - re-apply state.
o.dseq = state.Delivered.ConsumerSeq
o.sseq = state.Delivered.StreamSeq
o.adflr = state.AckFloor.ConsumerSeq
o.asflr = state.AckFloor.StreamSeq
o.pending = state.Pending
o.rdc = state.Redelivered
}
// Setup tracking timer if we have restored pending.
if len(o.pending) > 0 && o.ptmr == nil {
o.mu.Lock()
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
o.mu.Unlock()
}
return err
}
// Update our state to the store.
func (o *Consumer) writeState() {
o.mu.Lock()
if o.store != nil {
state := &ConsumerState{
Delivered: SequencePair{
ConsumerSeq: o.dseq,
StreamSeq: o.sseq,
},
AckFloor: SequencePair{
ConsumerSeq: o.adflr,
StreamSeq: o.asflr,
},
Pending: o.pending,
Redelivered: o.rdc,
}
// FIXME(dlc) - Hold onto any errors.
o.store.Update(state)
}
o.mu.Unlock()
}
func (o *Consumer) updateStateLoop() {
o.mu.Lock()
fch := o.fch
qch := o.qch
inch := o.inch
o.mu.Unlock()
for {
select {
case <-qch:
return
case interest := <-inch:
// inch can be nil on pull-based, but then this will
// just block and not fire.
o.updateDeliveryInterest(interest)
case <-fch:
// FIXME(dlc) - Check for fast changes at quick intervals.
time.Sleep(25 * time.Millisecond)
o.writeState()
}
}
}
// Info returns our current consumer state.
func (o *Consumer) Info() *ConsumerInfo {
o.mu.Lock()
info := &ConsumerInfo{
Stream: o.stream,
Name: o.name,
Created: o.created,
Config: o.config,
Delivered: SequencePair{
ConsumerSeq: o.dseq - 1,
StreamSeq: o.sseq - 1,
},
AckFloor: SequencePair{
ConsumerSeq: o.adflr,
StreamSeq: o.asflr,
},
NumPending: len(o.pending),
NumRedelivered: len(o.rdc),
}
// If we are a pull mode consumer, report on number of waiting requests.
if o.isPullMode() {
info.NumWaiting = o.waiting.len()
}
o.mu.Unlock()
return info
}
// Will update the underlying store.
// Lock should be held.
func (o *Consumer) updateStore() {
if o.store == nil {
return
}
// Kick our flusher
select {
case o.fch <- struct{}{}:
default:
}
}
// shouldSample lets us know if we are sampling metrics on acks.
func (o *Consumer) shouldSample() bool {
switch {
case o.sfreq <= 0:
return false
case o.sfreq >= 100:
return true
}
// TODO(ripienaar) this is a tad slow so we need to rethink here, however this will only
// hit for those with sampling enabled and its not the default
return mrand.Int31n(100) <= o.sfreq
}
func (o *Consumer) sampleAck(sseq, dseq, dcount uint64) {
if !o.shouldSample() {
return
}
now := time.Now().UTC()
unow := now.UnixNano()
e := JSConsumerAckMetric{
TypedEvent: TypedEvent{
Type: JSConsumerAckMetricType,
ID: nuid.Next(),
Time: now,
},
Stream: o.stream,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Delay: unow - o.pending[sseq],
Deliveries: dcount,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
o.sendAdvisory(o.ackEventT, j)
}
// Process an ack for a message.
func (o *Consumer) ackMsg(sseq, dseq, dcount uint64) {
o.processAckMsg(sseq, dseq, dcount, true)
}
func (o *Consumer) processAckMsg(sseq, dseq, dcount uint64, doSample bool) {
var sagap uint64
o.mu.Lock()
switch o.config.AckPolicy {
case AckExplicit:
if _, ok := o.pending[sseq]; ok {
if doSample {
o.sampleAck(sseq, dseq, dcount)
}
delete(o.pending, sseq)
// Consumers sequence numbers can skip during redlivery since
// they always increment. So if we do not have any pending treat
// as all scenario below. Otherwise check that we filled in a gap.
if len(o.pending) == 0 {
o.adflr, o.asflr = o.dseq-1, o.sseq-1
} else if dseq == o.adflr+1 {
o.adflr, o.asflr = dseq, sseq
}
}
// We do these regardless.
delete(o.rdc, sseq)
o.removeFromRedeliverQueue(sseq)
case AckAll:
// no-op
if dseq <= o.adflr || sseq <= o.asflr {
o.mu.Unlock()
return
}
sagap = sseq - o.asflr
o.adflr, o.asflr = dseq, sseq
for seq := sseq; seq > sseq-sagap; seq-- {
delete(o.pending, seq)
delete(o.rdc, seq)
o.removeFromRedeliverQueue(seq)
}
case AckNone:
// FIXME(dlc) - This is error but do we care?
o.mu.Unlock()
return
}
o.updateStore()
mset := o.mset
o.mu.Unlock()
// Let the owning stream know if we are interest or workqueue retention based.
if mset != nil && mset.config.Retention != LimitsPolicy {
if sagap > 1 {
// FIXME(dlc) - This is very inefficient, will need to fix.
for seq := sseq; seq > sseq-sagap; seq-- {
mset.ackMsg(o, seq)
}
} else {
mset.ackMsg(o, sseq)
}
}
}
// Check if we need an ack for this store seq.
// This is called for interest based retention streams to remove messages.
func (o *Consumer) needAck(sseq uint64) bool {
var needAck bool
o.mu.Lock()
switch o.config.AckPolicy {
case AckNone, AckAll:
needAck = sseq > o.asflr
case AckExplicit:
if sseq > o.asflr {
// Generally this means we need an ack, but just double check pending acks.
needAck = true
if len(o.pending) > 0 && sseq < o.sseq {
_, needAck = o.pending[sseq]
}
}
}
o.mu.Unlock()
return needAck
}
// Helper for the next message requests.
func nextReqFromMsg(msg []byte) (time.Time, int, bool, error) {
req := strings.TrimSpace(string(msg))
if len(req) == 0 {
return time.Time{}, 1, false, nil
}
if req[0] == '{' {
var cr JSApiConsumerGetNextRequest
if err := json.Unmarshal(msg, &cr); err != nil {
return time.Time{}, -1, false, err
}
return cr.Expires, cr.Batch, cr.NoWait, nil
}
// Naked batch size here for backward compatibility.
bs := 1
if n, err := strconv.Atoi(req); err == nil {
bs = n
}
return time.Time{}, bs, false, nil
}
// Represents a request that is on the internal waiting queue
type waitingRequest struct {
client *client
reply string
n int // For batching
expires time.Time
noWait bool
}
// waiting queue for requests that are waiting for new messages to arrive.
type waitQueue struct {
rp, wp int
reqs []*waitingRequest
}
// Create a new ring buffer with at most max items.
func newWaitQueue(max int) *waitQueue {
return &waitQueue{rp: -1, reqs: make([]*waitingRequest, max)}
}
var (
errWaitQueueFull = errors.New("wait queue is full")
errWaitQueueNil = errors.New("wait queue is nil")
)
// Adds in a new request.
func (wq *waitQueue) add(req *waitingRequest) error {
if wq == nil {
return errWaitQueueNil
}
if wq.isFull() {
return errWaitQueueFull
}
wq.reqs[wq.wp] = req
// TODO(dlc) - Could make pow2 and get rid of mod.
wq.wp = (wq.wp + 1) % cap(wq.reqs)
// Adjust read pointer if we were empty.
if wq.rp < 0 {
wq.rp = 0
}
return nil
}
func (wq *waitQueue) isFull() bool {
return wq.rp == wq.wp
}
func (wq *waitQueue) len() int {
if wq == nil || wq.rp < 0 {
return 0
}
if wq.rp < wq.wp {
return wq.wp - wq.rp
}
return cap(wq.reqs) - wq.rp + wq.wp
}
// Peek will return the next request waiting or nil if empty.
func (wq *waitQueue) peek() *waitingRequest {
if wq == nil {
return nil
}
var wr *waitingRequest
if wq.rp >= 0 {
wr = wq.reqs[wq.rp]
}
return wr
}
// pop will return the next request and move the read cursor.
func (wq *waitQueue) pop() *waitingRequest {
wr := wq.peek()
if wr != nil {
wr.n--
if wr.n <= 0 {
wq.reqs[wq.rp] = nil
wq.rp = (wq.rp + 1) % cap(wq.reqs)
// Check if we are empty.
if wq.rp == wq.wp {
wq.rp, wq.wp = -1, 0
}
}
}
return wr
}
// processNextMsgReq will process a request for the next message available. A nil message payload means deliver
// a single message. If the payload is a number parseable with Atoi(), then we will send a batch of messages without
// requiring another request to this endpoint, or an ACK.
func (o *Consumer) processNextMsgReq(_ *subscription, c *client, _, reply string, msg []byte) {
o.mu.Lock()
mset := o.mset
if mset == nil || o.isPushMode() {
o.mu.Unlock()
return
}
sendErr := func(status int, description string) {
sendq := mset.sendq
o.mu.Unlock()
hdr := []byte(fmt.Sprintf("NATS/1.0 %d %s\r\n\r\n", status, description))
pmsg := &jsPubMsg{reply, reply, _EMPTY_, hdr, nil, nil, 0}
sendq <- pmsg // Send message.
}
if o.waiting.isFull() {
// Try to expire some of the requests.
if expired := o.expireWaiting(); expired == 0 {
// Force expiration if needed.
o.forceExpireFirstWaiting()
}
}
// Check payload here to see if they sent in batch size or a formal request.
expires, batchSize, noWait, err := nextReqFromMsg(msg)
if err != nil {
sendErr(400, fmt.Sprintf("Bad Request - %v", err))
return
}
// In case we have to queue up this request. This is all on stack pre-allocated.
wr := waitingRequest{client: c, reply: reply, n: batchSize, noWait: noWait, expires: expires}
// If we are in replay mode, defer to processReplay for delivery.
if o.replay {
o.waiting.add(&wr)
o.mu.Unlock()
mset.signalConsumers()
return
}
for i := 0; i < batchSize; i++ {
if subj, hdr, msg, seq, dc, ts, err := o.getNextMsg(); err == nil {
o.deliverMsg(reply, subj, hdr, msg, seq, dc, ts)
// Need to discount this from the total n for the request.
wr.n--
} else {
if wr.noWait {
sendErr(404, "No Messages")
return
}
o.waiting.add(&wr)
break
}
}
o.mu.Unlock()
}
// Increase the delivery count for this message.
// ONLY used on redelivery semantics.
// Lock should be held.
func (o *Consumer) incDeliveryCount(sseq uint64) uint64 {
if o.rdc == nil {
o.rdc = make(map[uint64]uint64)
}
o.rdc[sseq] += 1
return o.rdc[sseq] + 1
}
// send a delivery exceeded advisory.
func (o *Consumer) notifyDeliveryExceeded(sseq, dcount uint64) {
e := JSConsumerDeliveryExceededAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerDeliveryExceededAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
StreamSeq: sseq,
Deliveries: dcount,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
o.sendAdvisory(o.deliveryExcEventT, j)
}
// Check to see if the candidate subject matches a filter if its present.
func (o *Consumer) isFilteredMatch(subj string) bool {
if !o.filterWC {
return subj == o.config.FilterSubject
}
// If we are here we have a wildcard filter subject.
// TODO(dlc) at speed might be better to just do a sublist with L2 and/or possibly L1.
return subjectIsSubsetMatch(subj, o.config.FilterSubject)
}
// Get next available message from underlying store.
// Is partition aware and redeliver aware.
// Lock should be held.
func (o *Consumer) getNextMsg() (subj string, hdr, msg []byte, seq uint64, dcount uint64, ts int64, err error) {
if o.mset == nil {
return _EMPTY_, nil, nil, 0, 0, 0, fmt.Errorf("consumer not valid")
}
for {
seq, dcount := o.sseq, uint64(1)
if len(o.rdq) > 0 {
seq = o.rdq[0]
o.rdq = append(o.rdq[:0], o.rdq[1:]...)
dcount = o.incDeliveryCount(seq)
if o.maxdc > 0 && dcount > o.maxdc {
// Only send once
if dcount == o.maxdc+1 {
o.notifyDeliveryExceeded(seq, dcount-1)
}
// Make sure to remove from pending.
delete(o.pending, seq)
continue
}
}
subj, hdr, msg, ts, err := o.mset.store.LoadMsg(seq)
if err == nil {
if dcount == 1 { // First delivery.
o.sseq++
if o.config.FilterSubject != _EMPTY_ && !o.isFilteredMatch(subj) {
continue
}
}
// We have the msg here.
return subj, hdr, msg, seq, dcount, ts, nil
}
// We got an error here. If this is an EOF we will return, otherwise
// we can continue looking.
if err == ErrStoreEOF || err == ErrStoreClosed {
return _EMPTY_, nil, nil, 0, 0, 0, err
}
// Skip since its probably deleted or expired.
o.sseq++
}
}
// forceExpireFirstWaiting will force expire the first waiting.
// Lock should be held.
func (o *Consumer) forceExpireFirstWaiting() *waitingRequest {
// FIXME(dlc) - Should we do advisory here as well?
wr := o.waiting.pop()
if wr == nil {
return wr
}
// If we are expiring this and we think there is still interest, alert.
if rr := o.acc.sl.Match(wr.reply); len(rr.psubs)+len(rr.qsubs) > 0 && o.mset != nil {
// We still appear to have interest, so send alert as courtesy.
sendq := o.mset.sendq
o.mu.Unlock()
hdr := []byte("NATS/1.0 408 Request Timeout\r\n\r\n")
pmsg := &jsPubMsg{wr.reply, wr.reply, _EMPTY_, hdr, nil, nil, 0}
sendq <- pmsg // Send message.
o.mu.Lock()
}
return wr
}
// Will check for expiration and lack of interest on waiting requests.
func (o *Consumer) expireWaiting() int {
var expired int
now := time.Now()
for wr := o.waiting.peek(); wr != nil; wr = o.waiting.peek() {
if !wr.expires.IsZero() && now.After(wr.expires) {
o.forceExpireFirstWaiting()
expired++
continue
}
rr := o.acc.sl.Match(wr.reply)
if len(rr.psubs)+len(rr.qsubs) > 0 {
break
}
// No more interest so go ahead and remove this one from our list.
o.forceExpireFirstWaiting()
expired++
}
return expired
}
// Will check to make sure those waiting still have registered interest.
func (o *Consumer) checkWaitingForInterest() bool {
o.expireWaiting()
return o.waiting.len() > 0
}
func (o *Consumer) loopAndDeliverMsgs(s *Server, a *Account) {
// On startup check to see if we are in a a reply situtation where replay policy is not instant.
var (
lts int64 // last time stamp seen, used for replay.
lseq uint64
)
o.mu.Lock()
if o.replay {
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
}
lseq = o.mset.State().LastSeq
}
o.mu.Unlock()
// Deliver all the msgs we have now, once done or on a condition, we wait for new ones.
for {
var (
mset *Stream
seq, dcnt uint64
subj, dsubj string
hdr []byte
msg []byte
err error
ts int64
delay time.Duration
)
o.mu.Lock()
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
}
mset = o.mset
// If we are in push mode and not active let's stop sending.
if o.isPushMode() && !o.active {
goto waitForMsgs
}
// If we are in pull mode and no one is waiting already break and wait.
if o.isPullMode() && !o.checkWaitingForInterest() {
goto waitForMsgs
}
subj, hdr, msg, seq, dcnt, ts, err = o.getNextMsg()
// On error either wait or return.
if err != nil {
if err == ErrStoreMsgNotFound || err == ErrStoreEOF {
goto waitForMsgs
} else {
o.mu.Unlock()
return
}
}
if wr := o.waiting.pop(); wr != nil {
dsubj = wr.reply
} else {
dsubj = o.dsubj
}
// If we are in a replay scenario and have not caught up check if we need to delay here.
if o.replay && lts > 0 {
if delay = time.Duration(ts - lts); delay > time.Millisecond {
qch := o.qch
o.mu.Unlock()
select {
case <-qch:
return
case <-time.After(delay):
}
o.mu.Lock()
}
}
// Track this regardless.
lts = ts
// If we have a rate limit set make sure we check that here.
if o.rlimit != nil {
now := time.Now()
r := o.rlimit.ReserveN(now, len(msg)+len(hdr)+len(subj)+len(dsubj)+len(o.ackReplyT))
delay := r.DelayFrom(now)
if delay > 0 {
qch := o.qch
o.mu.Unlock()
select {
case <-qch:
return
case <-time.After(delay):
}
o.mu.Lock()
}
}
o.deliverMsg(dsubj, subj, hdr, msg, seq, dcnt, ts)
o.mu.Unlock()
continue
waitForMsgs:
// If we were in a replay state check to see if we are caught up. If so clear.
if o.replay && o.sseq > lseq {
o.replay = false
}
// We will wait here for new messages to arrive.
o.mu.Unlock()
mset.waitForMsgs()
}
}
func (o *Consumer) ackReply(sseq, dseq, dcount uint64, ts int64) string {
return fmt.Sprintf(o.ackReplyT, dcount, sseq, dseq, ts)
}
// deliverCurrentMsg is the hot path to deliver a message that was just received.
// Will return if the message was delivered or not.
func (o *Consumer) deliverCurrentMsg(subj string, hdr, msg []byte, seq uint64, ts int64) bool {
o.mu.Lock()
if seq != o.sseq {
o.mu.Unlock()
return false
}
// If we are in push mode and not active let's stop sending.
if o.isPushMode() && !o.active {
o.mu.Unlock()
return false
}
// If we are in pull mode and no one is waiting already break and wait.
if o.isPullMode() && !o.checkWaitingForInterest() {
o.mu.Unlock()
return false
}
// Bump store sequence here.
o.sseq++
// If we are partitioned and we do not match, do not consider this a failure.
// Go ahead and return true.
if o.config.FilterSubject != _EMPTY_ && !o.isFilteredMatch(subj) {
o.mu.Unlock()
return true
}
var dsubj string
if wr := o.waiting.pop(); wr != nil {
dsubj = wr.reply
} else {
dsubj = o.dsubj
}
if len(msg) > 0 {
msg = append(msg[:0:0], msg...)
}
o.deliverMsg(dsubj, subj, hdr, msg, seq, 1, ts)
o.mu.Unlock()
return true
}
// Deliver a msg to the observable.
// Lock should be held and o.mset validated to be non-nil.
func (o *Consumer) deliverMsg(dsubj, subj string, hdr, msg []byte, seq, dcount uint64, ts int64) {
if o.mset == nil {
return
}
pmsg := &jsPubMsg{dsubj, subj, o.ackReply(seq, o.dseq, dcount, ts), hdr, msg, o, seq}
mset := o.mset
sendq := o.mset.sendq
ap := o.config.AckPolicy
// This needs to be unlocked since the other side may need this lock on a failed delivery.
o.mu.Unlock()
// Send message.
sendq <- pmsg
// If we are ack none and mset is interest only we should make sure stream removes interest.
if ap == AckNone && mset.config.Retention == InterestPolicy && !mset.checkInterest(seq, o) {
// FIXME(dlc) - we have mset lock here, but should we??
mset.store.RemoveMsg(seq)
}
o.mu.Lock()
if ap == AckExplicit || ap == AckAll {
o.trackPending(seq)
} else if ap == AckNone {
o.adflr = o.dseq
o.asflr = seq
}
o.dseq++
o.updateStore()
}
// Tracks our outstanding pending acks. Only applicable to AckExplicit mode.
// Lock should be held.
func (o *Consumer) trackPending(seq uint64) {
if o.pending == nil {
o.pending = make(map[uint64]int64)
}
if o.ptmr == nil {
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
}
o.pending[seq] = time.Now().UnixNano()
}
// didNotDeliver is called when a delivery for a consumer message failed.
// Depending on our state, we will process the failure.
func (o *Consumer) didNotDeliver(seq uint64) {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return
}
shouldSignal := false
if o.isPushMode() {
o.active = false
} else if o.pending != nil {
// push mode and we have pending.
if _, ok := o.pending[seq]; ok {
// We found this messsage on pending, we need
// to queue it up for immediate redelivery since
// we know it was not delivered.
if !o.onRedeliverQueue(seq) {
o.rdq = append(o.rdq, seq)
shouldSignal = true
}
}
}
o.mu.Unlock()
if shouldSignal {
mset.signalConsumers()
}
}
// This checks if we already have this sequence queued for redelivery.
// FIXME(dlc) - This is O(n) but should be fast with small redeliver size.
// Lock should be held.
func (o *Consumer) onRedeliverQueue(seq uint64) bool {
for _, rseq := range o.rdq {
if rseq == seq {
return true
}
}
return false
}
// Remove a sequence from the redelivery queue.
// Lock should be held.
func (o *Consumer) removeFromRedeliverQueue(seq uint64) bool {
for i, rseq := range o.rdq {
if rseq == seq {
o.rdq = append(o.rdq[:i], o.rdq[i+1:]...)
return true
}
}
return false
}
// Checks the pending messages.
func (o *Consumer) checkPending() {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return
}
ttl := int64(o.config.AckWait)
next := int64(o.ackWait(0))
now := time.Now().UnixNano()
shouldSignal := false
// Since we can update timestamps, we have to review all pending.
// We may want to unlock here or warn if list is big.
// We also need to sort after.
var expired []uint64
for seq, ts := range o.pending {
elapsed := now - ts
if elapsed >= ttl {
if !o.onRedeliverQueue(seq) {
expired = append(expired, seq)
shouldSignal = true
}
} else if ttl-elapsed < next {
// Update when we should fire next.
next = ttl - elapsed
}
}
if len(expired) > 0 {
sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
o.rdq = append(o.rdq, expired...)
// Now we should update the timestamp here since we are redelivering.
// We will use an incrementing time to preserve order for any other redelivery.
off := now - o.pending[expired[0]]
for _, seq := range expired {
o.pending[seq] += off
}
}
if len(o.pending) > 0 {
o.ptmr.Reset(o.ackWait(time.Duration(next)))
} else {
o.ptmr.Stop()
o.ptmr = nil
}
o.mu.Unlock()
if shouldSignal {
mset.signalConsumers()
}
}
// SeqFromReply will extract a sequence number from a reply subject.
func (o *Consumer) SeqFromReply(reply string) uint64 {
_, seq, _, _ := o.ReplyInfo(reply)
return seq
}
// StreamSeqFromReply will extract the stream sequence from the reply subject.
func (o *Consumer) StreamSeqFromReply(reply string) uint64 {
seq, _, _, _ := o.ReplyInfo(reply)
return seq
}
// Grab encoded information in the reply subject for a delivered message.
func (o *Consumer) ReplyInfo(reply string) (sseq, dseq, dcount uint64, ts int64) {
n, err := fmt.Sscanf(reply, o.ackReplyT, &dcount, &sseq, &dseq, &ts)
if err != nil || n != 4 {
return 0, 0, 0, 0
}
return
}
// NextSeq returns the next delivered sequence number for this observable.
func (o *Consumer) NextSeq() uint64 {
o.mu.Lock()
dseq := o.dseq
o.mu.Unlock()
return dseq
}
// This will select the store seq to start with based on the
// partition subject.
func (o *Consumer) selectSubjectLast() {
stats := o.mset.store.State()
if stats.LastSeq == 0 {
o.sseq = stats.LastSeq
return
}
// FIXME(dlc) - this is linear and can be optimized by store layer.
for seq := stats.LastSeq; seq >= stats.FirstSeq; seq-- {
subj, _, _, _, err := o.mset.store.LoadMsg(seq)
if err == ErrStoreMsgNotFound {
continue
}
if o.isFilteredMatch(subj) {
o.sseq = seq
return
}
}
}
// Will select the starting sequence.
func (o *Consumer) selectStartingSeqNo() {
stats := o.mset.store.State()
if o.config.OptStartSeq == 0 {
if o.config.DeliverPolicy == DeliverAll {
o.sseq = stats.FirstSeq
} else if o.config.DeliverPolicy == DeliverLast {
o.sseq = stats.LastSeq
// If we are partitioned here we may need to walk backwards.
if o.config.FilterSubject != _EMPTY_ {
o.selectSubjectLast()
}
} else if o.config.OptStartTime != nil {
// If we are here we are time based.
// TODO(dlc) - Once clustered can't rely on this.
o.sseq = o.mset.store.GetSeqFromTime(*o.config.OptStartTime)
} else {
// Default is deliver new only.
o.sseq = stats.LastSeq + 1
}
} else {
o.sseq = o.config.OptStartSeq
}
if stats.FirstSeq == 0 {
o.sseq = 1
} else if o.sseq < stats.FirstSeq {
o.sseq = stats.FirstSeq
} else if o.sseq > stats.LastSeq {
o.sseq = stats.LastSeq + 1
}
// Always set delivery sequence to 1.
o.dseq = 1
// Set ack delivery floor to delivery-1
o.adflr = o.dseq - 1
// Set ack store floor to store-1
o.asflr = o.sseq - 1
}
// Test whether a config represents a durable subscriber.
func isDurableConsumer(config *ConsumerConfig) bool {
return config != nil && config.Durable != _EMPTY_
}
func (o *Consumer) isDurable() bool {
return o.config.Durable != _EMPTY_
}
// Are we in push mode, delivery subject, etc.
func (o *Consumer) isPushMode() bool {
return o.config.DeliverSubject != _EMPTY_
}
func (o *Consumer) isPullMode() bool {
return o.config.DeliverSubject == _EMPTY_
}
// Name returns the name of this observable.
func (o *Consumer) Name() string {
o.mu.Lock()
n := o.name
o.mu.Unlock()
return n
}
// For now size of 6 for randomly created names.
const randConsumerNameLen = 6
func createConsumerName() string {
var b [256]byte
rand.Read(b[:])
sha := sha256.New()
sha.Write(b[:])
return fmt.Sprintf("%x", sha.Sum(nil))[:randConsumerNameLen]
}
// DeleteConsumer will delete the consumer from this stream.
func (mset *Stream) DeleteConsumer(o *Consumer) error {
return o.Delete()
}
// Active indicates if this consumer is still active.
func (o *Consumer) Active() bool {
o.mu.Lock()
active := o.active && o.mset != nil
o.mu.Unlock()
return active
}
// hasNoLocalInterest return true if we have no local interest.
func (o *Consumer) hasNoLocalInterest() bool {
o.mu.Lock()
rr := o.acc.sl.Match(o.config.DeliverSubject)
o.mu.Unlock()
return len(rr.psubs)+len(rr.qsubs) == 0
}
// This is when the underlying stream has been purged.
func (o *Consumer) purge(sseq uint64) {
o.mu.Lock()
o.sseq = sseq
o.asflr = sseq - 1
o.adflr = o.dseq - 1
if len(o.pending) > 0 {
o.pending = nil
if o.ptmr != nil {
o.ptmr.Stop()
// Do not nil this out here. This allows checkPending to fire
// and still be ok and not panic.
}
}
// We need to remove all those being queued for redelivery under o.rdq
if len(o.rdq) > 0 {
var newRDQ []uint64
for _, sseq := range o.rdq {
if sseq >= o.sseq {
newRDQ = append(newRDQ, sseq)
}
}
// Replace with new list. Most of the time this will be nil.
o.rdq = newRDQ
}
o.mu.Unlock()
}
func stopAndClearTimer(tp **time.Timer) {
if *tp == nil {
return
}
// Will get drained in normal course, do not try to
// drain here.
(*tp).Stop()
*tp = nil
}
// Stop will shutdown the consumer for the associated stream.
func (o *Consumer) Stop() error {
return o.stop(false, true, false)
}
func (o *Consumer) deleteWithoutAdvisory() error {
return o.stop(true, true, false)
}
// Delete will delete the consumer for the associated stream and send advisories.
func (o *Consumer) Delete() error {
return o.stop(true, true, true)
}
func (o *Consumer) stop(dflag, doSignal, advisory bool) error {
o.mu.Lock()
if o.closed {
o.mu.Unlock()
return nil
}
o.closed = true
if dflag && advisory {
o.sendDeleteAdvisoryLocked()
}
a := o.acc
close(o.qch)
store := o.store
mset := o.mset
o.mset = nil
o.active = false
ackSub := o.ackSub
reqSub := o.reqSub
o.ackSub = nil
o.reqSub = nil
stopAndClearTimer(&o.ptmr)
stopAndClearTimer(&o.dtmr)
delivery := o.config.DeliverSubject
o.waiting = nil
o.mu.Unlock()
if delivery != "" {
a.sl.ClearNotification(delivery, o.inch)
}
mset.mu.Lock()
// Break us out of the readLoop.
// TODO(dlc) - Should not be bad for small amounts of observables, maybe
// even into thousands. Above that should check what this might do
// performance wise.
if doSignal {
mset.sg.Broadcast()
}
mset.unsubscribe(ackSub)
mset.unsubscribe(reqSub)
delete(mset.consumers, o.name)
rp := mset.config.Retention
mset.mu.Unlock()
// We need to optionally remove all messages since we are interest based retention.
if dflag && rp == InterestPolicy {
var seqs []uint64
o.mu.Lock()
for seq := range o.pending {
seqs = append(seqs, seq)
}
o.mu.Unlock()
// Sort just to keep pending sparse array state small.
sort.Slice(seqs, func(i, j int) bool { return seqs[i] < seqs[j] })
for _, seq := range seqs {
mset.mu.Lock()
hasNoInterest := !mset.checkInterest(seq, o)
mset.mu.Unlock()
if hasNoInterest {
mset.store.RemoveMsg(seq)
}
}
}
// Make sure we stamp our update state
if !dflag {
o.writeState()
}
var err error
if store != nil {
if dflag {
err = store.Delete()
} else {
err = store.Stop()
}
}
return err
}
// Check that we do not form a cycle by delivering to a delivery subject
// that is part of the interest group.
func (mset *Stream) deliveryFormsCycle(deliverySubject string) bool {
mset.mu.Lock()
defer mset.mu.Unlock()
for _, subject := range mset.config.Subjects {
if subjectIsSubsetMatch(deliverySubject, subject) {
return true
}
}
return false
}
// This is same as check for delivery cycle.
func (mset *Stream) validSubject(partitionSubject string) bool {
return mset.deliveryFormsCycle(partitionSubject)
}
// SetInActiveDeleteThreshold sets the delete threshold for how long to wait
// before deleting an inactive ephemeral observable.
func (o *Consumer) SetInActiveDeleteThreshold(dthresh time.Duration) error {
o.mu.Lock()
defer o.mu.Unlock()
if o.isPullMode() {
return fmt.Errorf("consumer is not push-based")
}
if o.isDurable() {
return fmt.Errorf("consumer is not durable")
}
deleteWasRunning := o.dtmr != nil
stopAndClearTimer(&o.dtmr)
o.dthresh = dthresh
if deleteWasRunning {
o.dtmr = time.AfterFunc(o.dthresh, func() { o.Delete() })
}
return nil
}
// switchToEphemeral is called on startup when recovering ephemerals.
func (o *Consumer) switchToEphemeral() {
o.mu.Lock()
o.config.Durable = _EMPTY_
store, ok := o.store.(*consumerFileStore)
rr := o.acc.sl.Match(o.config.DeliverSubject)
o.mu.Unlock()
// Update interest
o.updateDeliveryInterest(len(rr.psubs)+len(rr.qsubs) > 0)
// Write out new config
if ok {
store.updateConfig(o.config)
}
}
// RequestNextMsgSubject returns the subject to request the next message when in pull or worker mode.
// Returns empty otherwise.
func (o *Consumer) RequestNextMsgSubject() string {
return o.nextMsgSubj
}
| 1 | 11,726 | I feel we need to impose a lower limit here maybe? What about a max and default for when none is set? | nats-io-nats-server | go |
@@ -245,7 +245,15 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt
"UpdateID": cds.updateIDString(),
}, nil
case "BrowseMetadata":
- result, err := xml.Marshal(obj)
+ node, err := cds.vfs.Stat(obj.Path)
+ if err != nil {
+ return nil, err
+ }
+ upnpObject, err := cds.cdsObjectToUpnpavObject(obj, node, host)
+ if err != nil {
+ return nil, err
+ }
+ result, err := xml.Marshal(upnpObject)
if err != nil {
return nil, err
} | 1 | package dlna
import (
"context"
"encoding/xml"
"fmt"
"log"
"mime"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/anacrolix/dms/dlna"
"github.com/anacrolix/dms/upnp"
"github.com/anacrolix/dms/upnpav"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/vfs"
)
// Add a minimal number of mime types to augment go's built in types
// for environments which don't have access to a mime.types file (eg
// Termux on android)
func init() {
for _, t := range []struct {
mimeType string
extensions string
}{
{"audio/flac", ".flac"},
{"audio/mpeg", ".mpga,.mpega,.mp2,.mp3,.m4a"},
{"audio/ogg", ".oga,.ogg,.opus,.spx"},
{"audio/x-wav", ".wav"},
{"image/tiff", ".tiff,.tif"},
{"video/dv", ".dif,.dv"},
{"video/fli", ".fli"},
{"video/mpeg", ".mpeg,.mpg,.mpe"},
{"video/MP2T", ".ts"},
{"video/mp4", ".mp4"},
{"video/quicktime", ".qt,.mov"},
{"video/ogg", ".ogv"},
{"video/webm", ".webm"},
{"video/x-msvideo", ".avi"},
{"video/x-matroska", ".mpv,.mkv"},
} {
for _, ext := range strings.Split(t.extensions, ",") {
err := mime.AddExtensionType(ext, t.mimeType)
if err != nil {
panic(err)
}
}
}
}
type contentDirectoryService struct {
*server
upnp.Eventing
}
func (cds *contentDirectoryService) updateIDString() string {
return fmt.Sprintf("%d", uint32(os.Getpid()))
}
var mediaMimeTypeRegexp = regexp.MustCompile("^(video|audio|image)/")
// Turns the given entry and DMS host into a UPnP object. A nil object is
// returned if the entry is not of interest.
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, host string) (ret interface{}, err error) {
obj := upnpav.Object{
ID: cdsObject.ID(),
Restricted: 1,
ParentID: cdsObject.ParentID(),
}
if fileInfo.IsDir() {
obj.Class = "object.container.storageFolder"
obj.Title = fileInfo.Name()
ret = upnpav.Container{Object: obj}
return
}
if !fileInfo.Mode().IsRegular() {
return
}
// Read the mime type from the fs.Object if possible,
// otherwise fall back to working out what it is from the file path.
var mimeType string
if o, ok := fileInfo.DirEntry().(fs.Object); ok {
mimeType = fs.MimeType(context.TODO(), o)
} else {
mimeType = fs.MimeTypeFromName(fileInfo.Name())
}
mediaType := mediaMimeTypeRegexp.FindStringSubmatch(mimeType)
if mediaType == nil {
return
}
obj.Class = "object.item." + mediaType[1] + "Item"
obj.Title = fileInfo.Name()
item := upnpav.Item{
Object: obj,
Res: make([]upnpav.Resource, 0, 1),
}
item.Res = append(item.Res, upnpav.Resource{
URL: (&url.URL{
Scheme: "http",
Host: host,
Path: resPath,
RawQuery: url.Values{
"path": {cdsObject.Path},
}.Encode(),
}).String(),
ProtocolInfo: fmt.Sprintf("http-get:*:%s:%s", mimeType, dlna.ContentFeatures{
SupportRange: true,
}.String()),
Bitrate: 0,
Duration: "",
Size: uint64(fileInfo.Size()),
Resolution: "",
})
ret = item
return
}
// Returns all the upnpav objects in a directory.
func (cds *contentDirectoryService) readContainer(o object, host string) (ret []interface{}, err error) {
node, err := cds.vfs.Stat(o.Path)
if err != nil {
return
}
if !node.IsDir() {
err = errors.New("not a directory")
return
}
dir := node.(*vfs.Dir)
dirEntries, err := dir.ReadDirAll()
if err != nil {
err = errors.New("failed to list directory")
return
}
sort.Sort(dirEntries)
for _, de := range dirEntries {
child := object{
path.Join(o.Path, de.Name()),
}
obj, err := cds.cdsObjectToUpnpavObject(child, de, host)
if err != nil {
fs.Errorf(cds, "error with %s: %s", child.FilePath(), err)
continue
}
if obj == nil {
fs.Debugf(cds, "unrecognized file type: %s", de)
continue
}
ret = append(ret, obj)
}
return
}
type browse struct {
ObjectID string
BrowseFlag string
Filter string
StartingIndex int
RequestedCount int
}
// ContentDirectory object from ObjectID.
func (cds *contentDirectoryService) objectFromID(id string) (o object, err error) {
o.Path, err = url.QueryUnescape(id)
if err != nil {
return
}
if o.Path == "0" {
o.Path = "/"
}
o.Path = path.Clean(o.Path)
if !path.IsAbs(o.Path) {
err = fmt.Errorf("bad ObjectID %v", o.Path)
return
}
return
}
func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *http.Request) (map[string]string, error) {
host := r.Host
switch action {
case "GetSystemUpdateID":
return map[string]string{
"Id": cds.updateIDString(),
}, nil
case "GetSortCapabilities":
return map[string]string{
"SortCaps": "dc:title",
}, nil
case "Browse":
var browse browse
if err := xml.Unmarshal(argsXML, &browse); err != nil {
return nil, err
}
obj, err := cds.objectFromID(browse.ObjectID)
if err != nil {
return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, err.Error())
}
switch browse.BrowseFlag {
case "BrowseDirectChildren":
objs, err := cds.readContainer(obj, host)
if err != nil {
return nil, upnp.Errorf(upnpav.NoSuchObjectErrorCode, err.Error())
}
totalMatches := len(objs)
objs = objs[func() (low int) {
low = browse.StartingIndex
if low > len(objs) {
low = len(objs)
}
return
}():]
if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) {
objs = objs[:browse.RequestedCount]
}
result, err := xml.Marshal(objs)
if err != nil {
return nil, err
}
return map[string]string{
"TotalMatches": fmt.Sprint(totalMatches),
"NumberReturned": fmt.Sprint(len(objs)),
"Result": didlLite(string(result)),
"UpdateID": cds.updateIDString(),
}, nil
case "BrowseMetadata":
result, err := xml.Marshal(obj)
if err != nil {
return nil, err
}
return map[string]string{
"Result": didlLite(string(result)),
}, nil
default:
return nil, upnp.Errorf(upnp.ArgumentValueInvalidErrorCode, "unhandled browse flag: %v", browse.BrowseFlag)
}
case "GetSearchCapabilities":
return map[string]string{
"SearchCaps": "",
}, nil
// Samsung Extensions
case "X_GetFeatureList":
return map[string]string{
"FeatureList": `<Features xmlns="urn:schemas-upnp-org:av:avs" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:schemas-upnp-org:av:avs http://www.upnp.org/schemas/av/avs.xsd">
<Feature name="samsung.com_BASICVIEW" version="1">
<container id="/" type="object.item.imageItem"/>
<container id="/" type="object.item.audioItem"/>
<container id="/" type="object.item.videoItem"/>
</Feature>
</Features>`}, nil
case "X_SetBookmark":
// just ignore
return map[string]string{}, nil
default:
return nil, upnp.InvalidActionError
}
}
// Represents a ContentDirectory object.
type object struct {
Path string // The cleaned, absolute path for the object relative to the server.
}
// Returns the actual local filesystem path for the object.
func (o *object) FilePath() string {
return filepath.FromSlash(o.Path)
}
// Returns the ObjectID for the object. This is used in various ContentDirectory actions.
func (o object) ID() string {
if !path.IsAbs(o.Path) {
log.Panicf("Relative object path: %s", o.Path)
}
if len(o.Path) == 1 {
return "0"
}
return url.QueryEscape(o.Path)
}
func (o *object) IsRoot() bool {
return o.Path == "/"
}
// Returns the object's parent ObjectID. Fortunately it can be deduced from the
// ObjectID (for now).
func (o object) ParentID() string {
if o.IsRoot() {
return "-1"
}
o.Path = path.Dir(o.Path)
return o.ID()
}
| 1 | 9,421 | ineffectual assignment to `err` (from `ineffassign`) | rclone-rclone | go |
@@ -37,8 +37,8 @@ type Replacer interface {
// they will be used to overwrite other replacements
// if there is a name conflict.
type replacer struct {
- replacements map[string]string
- customReplacements map[string]string
+ replacements map[string]func() string
+ customReplacements map[string]func() string
emptyValue string
responseRecorder *ResponseRecorder
} | 1 | package httpserver
import (
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
)
// requestReplacer is a strings.Replacer which is used to
// encode literal \r and \n characters and keep everything
// on one line
var requestReplacer = strings.NewReplacer(
"\r", "\\r",
"\n", "\\n",
)
// Replacer is a type which can replace placeholder
// substrings in a string with actual values from a
// http.Request and ResponseRecorder. Always use
// NewReplacer to get one of these. Any placeholders
// made with Set() should overwrite existing values if
// the key is already used.
type Replacer interface {
Replace(string) string
Set(key, value string)
}
// replacer implements Replacer. customReplacements
// is used to store custom replacements created with
// Set() until the time of replacement, at which point
// they will be used to overwrite other replacements
// if there is a name conflict.
type replacer struct {
replacements map[string]string
customReplacements map[string]string
emptyValue string
responseRecorder *ResponseRecorder
}
// NewReplacer makes a new replacer based on r and rr which
// are used for request and response placeholders, respectively.
// Request placeholders are created immediately, whereas
// response placeholders are not created until Replace()
// is invoked. rr may be nil if it is not available.
// emptyValue should be the string that is used in place
// of empty string (can still be empty string).
func NewReplacer(r *http.Request, rr *ResponseRecorder, emptyValue string) Replacer {
rep := &replacer{
responseRecorder: rr,
customReplacements: make(map[string]string),
replacements: map[string]string{
"{method}": r.Method,
"{scheme}": func() string {
if r.TLS != nil {
return "https"
}
return "http"
}(),
"{hostname}": func() string {
name, err := os.Hostname()
if err != nil {
return ""
}
return name
}(),
"{host}": r.Host,
"{hostonly}": func() string {
host, _, err := net.SplitHostPort(r.Host)
if err != nil {
return r.Host
}
return host
}(),
"{path}": r.URL.Path,
"{path_escaped}": url.QueryEscape(r.URL.Path),
"{query}": r.URL.RawQuery,
"{query_escaped}": url.QueryEscape(r.URL.RawQuery),
"{fragment}": r.URL.Fragment,
"{proto}": r.Proto,
"{remote}": func() string {
if fwdFor := r.Header.Get("X-Forwarded-For"); fwdFor != "" {
return fwdFor
}
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return r.RemoteAddr
}
return host
}(),
"{port}": func() string {
_, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return ""
}
return port
}(),
"{uri}": r.URL.RequestURI(),
"{uri_escaped}": url.QueryEscape(r.URL.RequestURI()),
"{when}": time.Now().Format(timeFormat),
"{file}": func() string {
_, file := path.Split(r.URL.Path)
return file
}(),
"{dir}": func() string {
dir, _ := path.Split(r.URL.Path)
return dir
}(),
"{request}": func() string {
dump, err := httputil.DumpRequest(r, false)
if err != nil {
return ""
}
return requestReplacer.Replace(string(dump))
}(),
},
emptyValue: emptyValue,
}
// Header placeholders (case-insensitive)
for header, values := range r.Header {
rep.replacements[headerReplacer+strings.ToLower(header)+"}"] = strings.Join(values, ",")
}
return rep
}
// Replace performs a replacement of values on s and returns
// the string with the replaced values.
func (r *replacer) Replace(s string) string {
// Do not attempt replacements if no placeholder is found.
if !strings.ContainsAny(s, "{}") {
return s
}
// Make response placeholders now
if r.responseRecorder != nil {
r.replacements["{status}"] = strconv.Itoa(r.responseRecorder.status)
r.replacements["{size}"] = strconv.Itoa(r.responseRecorder.size)
r.replacements["{latency}"] = time.Since(r.responseRecorder.start).String()
}
// Include custom placeholders, overwriting existing ones if necessary
for key, val := range r.customReplacements {
r.replacements[key] = val
}
// Header replacements - these are case-insensitive, so we can't just use strings.Replace()
for strings.Contains(s, headerReplacer) {
idxStart := strings.Index(s, headerReplacer)
endOffset := idxStart + len(headerReplacer)
idxEnd := strings.Index(s[endOffset:], "}")
if idxEnd > -1 {
placeholder := strings.ToLower(s[idxStart : endOffset+idxEnd+1])
replacement := r.replacements[placeholder]
if replacement == "" {
replacement = r.emptyValue
}
s = s[:idxStart] + replacement + s[endOffset+idxEnd+1:]
} else {
break
}
}
// Regular replacements - these are easier because they're case-sensitive
for placeholder, replacement := range r.replacements {
if replacement == "" {
replacement = r.emptyValue
}
s = strings.Replace(s, placeholder, replacement, -1)
}
return s
}
// Set sets key to value in the r.customReplacements map.
func (r *replacer) Set(key, value string) {
r.customReplacements["{"+key+"}"] = value
}
const (
timeFormat = "02/Jan/2006:15:04:05 -0700"
headerReplacer = "{>"
)
| 1 | 8,617 | Ahhh... this makes sense. Does using functions to defer evaluation of these values have anything to do with #916 (where the `Host: {host}` was 'cached' between different hostnames)? If not, could you help me understand what this change does? | caddyserver-caddy | go |
@@ -1324,7 +1324,7 @@ TNonblockingIOThread::~TNonblockingIOThread() {
ownEventBase_ = false;
}
- if (listenSocket_ >= 0) {
+ if (listenSocket_ >= 0 && listenSocket_ != THRIFT_INVALID_SOCKET) {
if (0 != ::THRIFT_CLOSESOCKET(listenSocket_)) {
GlobalOutput.perror("TNonblockingIOThread listenSocket_ close(): ", THRIFT_GET_SOCKET_ERROR);
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#define __STDC_FORMAT_MACROS
#include <thrift/thrift-config.h>
#include <thrift/server/TNonblockingServer.h>
#include <thrift/concurrency/Exception.h>
#include <thrift/transport/TSocket.h>
#include <thrift/concurrency/PlatformThreadFactory.h>
#include <thrift/transport/PlatformSocket.h>
#include <iostream>
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#include <netinet/tcp.h>
#endif
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#ifdef HAVE_NETDB_H
#include <netdb.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include <assert.h>
#ifdef HAVE_SCHED_H
#include <sched.h>
#endif
#ifndef AF_LOCAL
#define AF_LOCAL AF_UNIX
#endif
#if !defined(PRIu32)
#define PRIu32 "I32u"
#define PRIu64 "I64u"
#endif
#if defined(_WIN32) && (_WIN32_WINNT < 0x0600)
#define AI_ADDRCONFIG 0x0400
#endif
namespace apache {
namespace thrift {
namespace server {
using namespace apache::thrift::protocol;
using namespace apache::thrift::transport;
using namespace apache::thrift::concurrency;
using namespace std;
using apache::thrift::transport::TSocket;
using apache::thrift::transport::TTransportException;
using boost::shared_ptr;
/// Three states for sockets: recv frame size, recv data, and send mode
enum TSocketState { SOCKET_RECV_FRAMING, SOCKET_RECV, SOCKET_SEND };
/**
* Five states for the nonblocking server:
* 1) initialize
* 2) read 4 byte frame size
* 3) read frame of data
* 4) send back data (if any)
* 5) force immediate connection close
*/
enum TAppState {
APP_INIT,
APP_READ_FRAME_SIZE,
APP_READ_REQUEST,
APP_WAIT_TASK,
APP_SEND_RESULT,
APP_CLOSE_CONNECTION
};
/**
* Represents a connection that is handled via libevent. This connection
* essentially encapsulates a socket that has some associated libevent state.
*/
class TNonblockingServer::TConnection {
private:
/// Server IO Thread handling this connection
TNonblockingIOThread* ioThread_;
/// Server handle
TNonblockingServer* server_;
/// TProcessor
boost::shared_ptr<TProcessor> processor_;
/// Object wrapping network socket
boost::shared_ptr<TSocket> tSocket_;
/// Libevent object
struct event event_;
/// Libevent flags
short eventFlags_;
/// Socket mode
TSocketState socketState_;
/// Application state
TAppState appState_;
/// How much data needed to read
uint32_t readWant_;
/// Where in the read buffer are we
uint32_t readBufferPos_;
/// Read buffer
uint8_t* readBuffer_;
/// Read buffer size
uint32_t readBufferSize_;
/// Write buffer
uint8_t* writeBuffer_;
/// Write buffer size
uint32_t writeBufferSize_;
/// How far through writing are we?
uint32_t writeBufferPos_;
/// Largest size of write buffer seen since buffer was constructed
size_t largestWriteBufferSize_;
/// Count of the number of calls for use with getResizeBufferEveryN().
int32_t callsForResize_;
/// Transport to read from
boost::shared_ptr<TMemoryBuffer> inputTransport_;
/// Transport that processor writes to
boost::shared_ptr<TMemoryBuffer> outputTransport_;
/// extra transport generated by transport factory (e.g. BufferedRouterTransport)
boost::shared_ptr<TTransport> factoryInputTransport_;
boost::shared_ptr<TTransport> factoryOutputTransport_;
/// Protocol decoder
boost::shared_ptr<TProtocol> inputProtocol_;
/// Protocol encoder
boost::shared_ptr<TProtocol> outputProtocol_;
/// Server event handler, if any
boost::shared_ptr<TServerEventHandler> serverEventHandler_;
/// Thrift call context, if any
void* connectionContext_;
/// Go into read mode
void setRead() { setFlags(EV_READ | EV_PERSIST); }
/// Go into write mode
void setWrite() { setFlags(EV_WRITE | EV_PERSIST); }
/// Set socket idle
void setIdle() { setFlags(0); }
/**
* Set event flags for this connection.
*
* @param eventFlags flags we pass to libevent for the connection.
*/
void setFlags(short eventFlags);
/**
* Libevent handler called (via our static wrapper) when the connection
* socket had something happen. Rather than use the flags libevent passed,
* we use the connection state to determine whether we need to read or
* write the socket.
*/
void workSocket();
public:
class Task;
/// Constructor
TConnection(THRIFT_SOCKET socket,
TNonblockingIOThread* ioThread,
const sockaddr* addr,
socklen_t addrLen) {
readBuffer_ = NULL;
readBufferSize_ = 0;
ioThread_ = ioThread;
server_ = ioThread->getServer();
// Allocate input and output transports these only need to be allocated
// once per TConnection (they don't need to be reallocated on init() call)
inputTransport_.reset(new TMemoryBuffer(readBuffer_, readBufferSize_));
outputTransport_.reset(
new TMemoryBuffer(static_cast<uint32_t>(server_->getWriteBufferDefaultSize())));
tSocket_.reset(new TSocket());
init(socket, ioThread, addr, addrLen);
}
~TConnection() { std::free(readBuffer_); }
/// Close this connection and free or reset its resources.
void close();
/**
* Check buffers against any size limits and shrink it if exceeded.
*
* @param readLimit we reduce read buffer size to this (if nonzero).
* @param writeLimit if nonzero and write buffer is larger, replace it.
*/
void checkIdleBufferMemLimit(size_t readLimit, size_t writeLimit);
/// Initialize
void init(THRIFT_SOCKET socket,
TNonblockingIOThread* ioThread,
const sockaddr* addr,
socklen_t addrLen);
/**
* This is called when the application transitions from one state into
* another. This means that it has finished writing the data that it needed
* to, or finished receiving the data that it needed to.
*/
void transition();
/**
* C-callable event handler for connection events. Provides a callback
* that libevent can understand which invokes connection_->workSocket().
*
* @param fd the descriptor the event occurred on.
* @param which the flags associated with the event.
* @param v void* callback arg where we placed TConnection's "this".
*/
static void eventHandler(evutil_socket_t fd, short /* which */, void* v) {
assert(fd == static_cast<evutil_socket_t>(((TConnection*)v)->getTSocket()->getSocketFD()));
((TConnection*)v)->workSocket();
}
/**
* Notification to server that processing has ended on this request.
* Can be called either when processing is completed or when a waiting
* task has been preemptively terminated (on overload).
*
* Don't call this from the IO thread itself.
*
* @return true if successful, false if unable to notify (check THRIFT_GET_SOCKET_ERROR).
*/
bool notifyIOThread() { return ioThread_->notify(this); }
/*
* Returns the number of this connection's currently assigned IO
* thread.
*/
int getIOThreadNumber() const { return ioThread_->getThreadNumber(); }
/// Force connection shutdown for this connection.
void forceClose() {
appState_ = APP_CLOSE_CONNECTION;
if (!notifyIOThread()) {
close();
throw TException("TConnection::forceClose: failed write on notify pipe");
}
}
/// return the server this connection was initialized for.
TNonblockingServer* getServer() const { return server_; }
/// get state of connection.
TAppState getState() const { return appState_; }
/// return the TSocket transport wrapping this network connection
boost::shared_ptr<TSocket> getTSocket() const { return tSocket_; }
/// return the server event handler if any
boost::shared_ptr<TServerEventHandler> getServerEventHandler() { return serverEventHandler_; }
/// return the Thrift connection context if any
void* getConnectionContext() { return connectionContext_; }
};
class TNonblockingServer::TConnection::Task : public Runnable {
public:
Task(boost::shared_ptr<TProcessor> processor,
boost::shared_ptr<TProtocol> input,
boost::shared_ptr<TProtocol> output,
TConnection* connection)
: processor_(processor),
input_(input),
output_(output),
connection_(connection),
serverEventHandler_(connection_->getServerEventHandler()),
connectionContext_(connection_->getConnectionContext()) {}
void run() {
try {
for (;;) {
if (serverEventHandler_) {
serverEventHandler_->processContext(connectionContext_, connection_->getTSocket());
}
if (!processor_->process(input_, output_, connectionContext_)
|| !input_->getTransport()->peek()) {
break;
}
}
} catch (const TTransportException& ttx) {
GlobalOutput.printf("TNonblockingServer: client died: %s", ttx.what());
} catch (const bad_alloc&) {
GlobalOutput("TNonblockingServer: caught bad_alloc exception.");
exit(1);
} catch (const std::exception& x) {
GlobalOutput.printf("TNonblockingServer: process() exception: %s: %s",
typeid(x).name(),
x.what());
} catch (...) {
GlobalOutput.printf("TNonblockingServer: unknown exception while processing.");
}
// Signal completion back to the libevent thread via a pipe
if (!connection_->notifyIOThread()) {
GlobalOutput.printf("TNonblockingServer: failed to notifyIOThread, closing.");
connection_->close();
throw TException("TNonblockingServer::Task::run: failed write on notify pipe");
}
}
TConnection* getTConnection() { return connection_; }
private:
boost::shared_ptr<TProcessor> processor_;
boost::shared_ptr<TProtocol> input_;
boost::shared_ptr<TProtocol> output_;
TConnection* connection_;
boost::shared_ptr<TServerEventHandler> serverEventHandler_;
void* connectionContext_;
};
void TNonblockingServer::TConnection::init(THRIFT_SOCKET socket,
TNonblockingIOThread* ioThread,
const sockaddr* addr,
socklen_t addrLen) {
tSocket_->setSocketFD(socket);
tSocket_->setCachedAddress(addr, addrLen);
ioThread_ = ioThread;
server_ = ioThread->getServer();
appState_ = APP_INIT;
eventFlags_ = 0;
readBufferPos_ = 0;
readWant_ = 0;
writeBuffer_ = NULL;
writeBufferSize_ = 0;
writeBufferPos_ = 0;
largestWriteBufferSize_ = 0;
socketState_ = SOCKET_RECV_FRAMING;
callsForResize_ = 0;
// get input/transports
factoryInputTransport_ = server_->getInputTransportFactory()->getTransport(inputTransport_);
factoryOutputTransport_ = server_->getOutputTransportFactory()->getTransport(outputTransport_);
// Create protocol
if (server_->getHeaderTransport()) {
inputProtocol_ = server_->getInputProtocolFactory()->getProtocol(factoryInputTransport_,
factoryOutputTransport_);
outputProtocol_ = inputProtocol_;
} else {
inputProtocol_ = server_->getInputProtocolFactory()->getProtocol(factoryInputTransport_);
outputProtocol_ = server_->getOutputProtocolFactory()->getProtocol(factoryOutputTransport_);
}
// Set up for any server event handler
serverEventHandler_ = server_->getEventHandler();
if (serverEventHandler_) {
connectionContext_ = serverEventHandler_->createContext(inputProtocol_, outputProtocol_);
} else {
connectionContext_ = NULL;
}
// Get the processor
processor_ = server_->getProcessor(inputProtocol_, outputProtocol_, tSocket_);
}
void TNonblockingServer::TConnection::workSocket() {
int got = 0, left = 0, sent = 0;
uint32_t fetch = 0;
switch (socketState_) {
case SOCKET_RECV_FRAMING:
union {
uint8_t buf[sizeof(uint32_t)];
uint32_t size;
} framing;
// if we've already received some bytes we kept them here
framing.size = readWant_;
// determine size of this frame
try {
// Read from the socket
fetch = tSocket_->read(&framing.buf[readBufferPos_],
uint32_t(sizeof(framing.size) - readBufferPos_));
if (fetch == 0) {
// Whenever we get here it means a remote disconnect
close();
return;
}
readBufferPos_ += fetch;
} catch (TTransportException& te) {
GlobalOutput.printf("TConnection::workSocket(): %s", te.what());
close();
return;
}
if (readBufferPos_ < sizeof(framing.size)) {
// more needed before frame size is known -- save what we have so far
readWant_ = framing.size;
return;
}
readWant_ = ntohl(framing.size);
if (readWant_ > server_->getMaxFrameSize()) {
// Don't allow giant frame sizes. This prevents bad clients from
// causing us to try and allocate a giant buffer.
GlobalOutput.printf(
"TNonblockingServer: frame size too large "
"(%" PRIu32 " > %" PRIu64
") from client %s. "
"Remote side not using TFramedTransport?",
readWant_,
(uint64_t)server_->getMaxFrameSize(),
tSocket_->getSocketInfo().c_str());
close();
return;
}
// size known; now get the rest of the frame
transition();
return;
case SOCKET_RECV:
// It is an error to be in this state if we already have all the data
assert(readBufferPos_ < readWant_);
try {
// Read from the socket
fetch = readWant_ - readBufferPos_;
got = tSocket_->read(readBuffer_ + readBufferPos_, fetch);
} catch (TTransportException& te) {
GlobalOutput.printf("TConnection::workSocket(): %s", te.what());
close();
return;
}
if (got > 0) {
// Move along in the buffer
readBufferPos_ += got;
// Check that we did not overdo it
assert(readBufferPos_ <= readWant_);
// We are done reading, move onto the next state
if (readBufferPos_ == readWant_) {
transition();
}
return;
}
// Whenever we get down here it means a remote disconnect
close();
return;
case SOCKET_SEND:
// Should never have position past size
assert(writeBufferPos_ <= writeBufferSize_);
// If there is no data to send, then let us move on
if (writeBufferPos_ == writeBufferSize_) {
GlobalOutput("WARNING: Send state with no data to send\n");
transition();
return;
}
try {
left = writeBufferSize_ - writeBufferPos_;
sent = tSocket_->write_partial(writeBuffer_ + writeBufferPos_, left);
} catch (TTransportException& te) {
GlobalOutput.printf("TConnection::workSocket(): %s ", te.what());
close();
return;
}
writeBufferPos_ += sent;
// Did we overdo it?
assert(writeBufferPos_ <= writeBufferSize_);
// We are done!
if (writeBufferPos_ == writeBufferSize_) {
transition();
}
return;
default:
GlobalOutput.printf("Unexpected Socket State %d", socketState_);
assert(0);
}
}
bool TNonblockingServer::getHeaderTransport() {
// Currently if there is no output protocol factory,
// we assume header transport (without having to create
// a new transport and check)
return getOutputProtocolFactory() == NULL;
}
/**
* This is called when the application transitions from one state into
* another. This means that it has finished writing the data that it needed
* to, or finished receiving the data that it needed to.
*/
void TNonblockingServer::TConnection::transition() {
// ensure this connection is active right now
assert(ioThread_);
assert(server_);
// Switch upon the state that we are currently in and move to a new state
switch (appState_) {
case APP_READ_REQUEST:
// We are done reading the request, package the read buffer into transport
// and get back some data from the dispatch function
if (server_->getHeaderTransport()) {
inputTransport_->resetBuffer(readBuffer_, readBufferPos_);
outputTransport_->resetBuffer();
} else {
// We saved room for the framing size in case header transport needed it,
// but just skip it for the non-header case
inputTransport_->resetBuffer(readBuffer_ + 4, readBufferPos_ - 4);
outputTransport_->resetBuffer();
// Prepend four bytes of blank space to the buffer so we can
// write the frame size there later.
outputTransport_->getWritePtr(4);
outputTransport_->wroteBytes(4);
}
server_->incrementActiveProcessors();
if (server_->isThreadPoolProcessing()) {
// We are setting up a Task to do this work and we will wait on it
// Create task and dispatch to the thread manager
boost::shared_ptr<Runnable> task = boost::shared_ptr<Runnable>(
new Task(processor_, inputProtocol_, outputProtocol_, this));
// The application is now waiting on the task to finish
appState_ = APP_WAIT_TASK;
try {
server_->addTask(task);
} catch (IllegalStateException& ise) {
// The ThreadManager is not ready to handle any more tasks (it's probably shutting down).
GlobalOutput.printf("IllegalStateException: Server::process() %s", ise.what());
close();
} catch (TimedOutException& to) {
GlobalOutput.printf("[ERROR] TimedOutException: Server::process() %s", to.what());
close();
}
// Set this connection idle so that libevent doesn't process more
// data on it while we're still waiting for the threadmanager to
// finish this task
setIdle();
return;
} else {
try {
if (serverEventHandler_) {
serverEventHandler_->processContext(connectionContext_, getTSocket());
}
// Invoke the processor
processor_->process(inputProtocol_, outputProtocol_, connectionContext_);
} catch (const TTransportException& ttx) {
GlobalOutput.printf(
"TNonblockingServer transport error in "
"process(): %s",
ttx.what());
server_->decrementActiveProcessors();
close();
return;
} catch (const std::exception& x) {
GlobalOutput.printf("Server::process() uncaught exception: %s: %s",
typeid(x).name(),
x.what());
server_->decrementActiveProcessors();
close();
return;
} catch (...) {
GlobalOutput.printf("Server::process() unknown exception");
server_->decrementActiveProcessors();
close();
return;
}
}
// Intentionally fall through here, the call to process has written into
// the writeBuffer_
case APP_WAIT_TASK:
// We have now finished processing a task and the result has been written
// into the outputTransport_, so we grab its contents and place them into
// the writeBuffer_ for actual writing by the libevent thread
server_->decrementActiveProcessors();
// Get the result of the operation
outputTransport_->getBuffer(&writeBuffer_, &writeBufferSize_);
// If the function call generated return data, then move into the send
// state and get going
// 4 bytes were reserved for frame size
if (writeBufferSize_ > 4) {
// Move into write state
writeBufferPos_ = 0;
socketState_ = SOCKET_SEND;
// Put the frame size into the write buffer
int32_t frameSize = (int32_t)htonl(writeBufferSize_ - 4);
memcpy(writeBuffer_, &frameSize, 4);
// Socket into write mode
appState_ = APP_SEND_RESULT;
setWrite();
// Try to work the socket immediately
// workSocket();
return;
}
// In this case, the request was oneway and we should fall through
// right back into the read frame header state
goto LABEL_APP_INIT;
case APP_SEND_RESULT:
// it's now safe to perform buffer size housekeeping.
if (writeBufferSize_ > largestWriteBufferSize_) {
largestWriteBufferSize_ = writeBufferSize_;
}
if (server_->getResizeBufferEveryN() > 0
&& ++callsForResize_ >= server_->getResizeBufferEveryN()) {
checkIdleBufferMemLimit(server_->getIdleReadBufferLimit(),
server_->getIdleWriteBufferLimit());
callsForResize_ = 0;
}
// N.B.: We also intentionally fall through here into the INIT state!
LABEL_APP_INIT:
case APP_INIT:
// Clear write buffer variables
writeBuffer_ = NULL;
writeBufferPos_ = 0;
writeBufferSize_ = 0;
// Into read4 state we go
socketState_ = SOCKET_RECV_FRAMING;
appState_ = APP_READ_FRAME_SIZE;
readBufferPos_ = 0;
// Register read event
setRead();
// Try to work the socket right away
// workSocket();
return;
case APP_READ_FRAME_SIZE:
readWant_ += 4;
// We just read the request length
// Double the buffer size until it is big enough
if (readWant_ > readBufferSize_) {
if (readBufferSize_ == 0) {
readBufferSize_ = 1;
}
uint32_t newSize = readBufferSize_;
while (readWant_ > newSize) {
newSize *= 2;
}
uint8_t* newBuffer = (uint8_t*)std::realloc(readBuffer_, newSize);
if (newBuffer == NULL) {
// nothing else to be done...
throw std::bad_alloc();
}
readBuffer_ = newBuffer;
readBufferSize_ = newSize;
}
readBufferPos_ = 4;
*((uint32_t*)readBuffer_) = htonl(readWant_ - 4);
// Move into read request state
socketState_ = SOCKET_RECV;
appState_ = APP_READ_REQUEST;
// Work the socket right away
// workSocket();
return;
case APP_CLOSE_CONNECTION:
server_->decrementActiveProcessors();
close();
return;
default:
GlobalOutput.printf("Unexpected Application State %d", appState_);
assert(0);
}
}
void TNonblockingServer::TConnection::setFlags(short eventFlags) {
// Catch the do nothing case
if (eventFlags_ == eventFlags) {
return;
}
// Delete a previously existing event
if (eventFlags_ != 0) {
if (event_del(&event_) == -1) {
GlobalOutput("TConnection::setFlags event_del");
return;
}
}
// Update in memory structure
eventFlags_ = eventFlags;
// Do not call event_set if there are no flags
if (!eventFlags_) {
return;
}
/*
* event_set:
*
* Prepares the event structure &event to be used in future calls to
* event_add() and event_del(). The event will be prepared to call the
* eventHandler using the 'sock' file descriptor to monitor events.
*
* The events can be either EV_READ, EV_WRITE, or both, indicating
* that an application can read or write from the file respectively without
* blocking.
*
* The eventHandler will be called with the file descriptor that triggered
* the event and the type of event which will be one of: EV_TIMEOUT,
* EV_SIGNAL, EV_READ, EV_WRITE.
*
* The additional flag EV_PERSIST makes an event_add() persistent until
* event_del() has been called.
*
* Once initialized, the &event struct can be used repeatedly with
* event_add() and event_del() and does not need to be reinitialized unless
* the eventHandler and/or the argument to it are to be changed. However,
* when an ev structure has been added to libevent using event_add() the
* structure must persist until the event occurs (assuming EV_PERSIST
* is not set) or is removed using event_del(). You may not reuse the same
* ev structure for multiple monitored descriptors; each descriptor needs
* its own ev.
*/
event_set(&event_, tSocket_->getSocketFD(), eventFlags_, TConnection::eventHandler, this);
event_base_set(ioThread_->getEventBase(), &event_);
// Add the event
if (event_add(&event_, 0) == -1) {
GlobalOutput("TConnection::setFlags(): could not event_add");
}
}
/**
* Closes a connection
*/
void TNonblockingServer::TConnection::close() {
// Delete the registered libevent
if (event_del(&event_) == -1) {
GlobalOutput.perror("TConnection::close() event_del", THRIFT_GET_SOCKET_ERROR);
}
if (serverEventHandler_) {
serverEventHandler_->deleteContext(connectionContext_, inputProtocol_, outputProtocol_);
}
ioThread_ = NULL;
// Close the socket
tSocket_->close();
// close any factory produced transports
factoryInputTransport_->close();
factoryOutputTransport_->close();
// release processor and handler
processor_.reset();
// Give this object back to the server that owns it
server_->returnConnection(this);
}
void TNonblockingServer::TConnection::checkIdleBufferMemLimit(size_t readLimit, size_t writeLimit) {
if (readLimit > 0 && readBufferSize_ > readLimit) {
free(readBuffer_);
readBuffer_ = NULL;
readBufferSize_ = 0;
}
if (writeLimit > 0 && largestWriteBufferSize_ > writeLimit) {
// just start over
outputTransport_->resetBuffer(static_cast<uint32_t>(server_->getWriteBufferDefaultSize()));
largestWriteBufferSize_ = 0;
}
}
TNonblockingServer::~TNonblockingServer() {
// Close any active connections (moves them to the idle connection stack)
while (activeConnections_.size()) {
activeConnections_.front()->close();
}
// Clean up unused TConnection objects in connectionStack_
while (!connectionStack_.empty()) {
TConnection* connection = connectionStack_.top();
connectionStack_.pop();
delete connection;
}
// The TNonblockingIOThread objects have shared_ptrs to the Thread
// objects and the Thread objects have shared_ptrs to the TNonblockingIOThread
// objects (as runnable) so these objects will never deallocate without help.
while (!ioThreads_.empty()) {
boost::shared_ptr<TNonblockingIOThread> iot = ioThreads_.back();
ioThreads_.pop_back();
iot->setThread(boost::shared_ptr<Thread>());
}
}
/**
* Creates a new connection either by reusing an object off the stack or
* by allocating a new one entirely
*/
TNonblockingServer::TConnection* TNonblockingServer::createConnection(THRIFT_SOCKET socket,
const sockaddr* addr,
socklen_t addrLen) {
// Check the stack
Guard g(connMutex_);
// pick an IO thread to handle this connection -- currently round robin
assert(nextIOThread_ < ioThreads_.size());
int selectedThreadIdx = nextIOThread_;
nextIOThread_ = static_cast<uint32_t>((nextIOThread_ + 1) % ioThreads_.size());
TNonblockingIOThread* ioThread = ioThreads_[selectedThreadIdx].get();
// Check the connection stack to see if we can re-use
TConnection* result = NULL;
if (connectionStack_.empty()) {
result = new TConnection(socket, ioThread, addr, addrLen);
++numTConnections_;
} else {
result = connectionStack_.top();
connectionStack_.pop();
result->init(socket, ioThread, addr, addrLen);
}
activeConnections_.push_back(result);
return result;
}
/**
* Returns a connection to the stack
*/
void TNonblockingServer::returnConnection(TConnection* connection) {
Guard g(connMutex_);
activeConnections_.erase(std::remove(activeConnections_.begin(),
activeConnections_.end(),
connection),
activeConnections_.end());
if (connectionStackLimit_ && (connectionStack_.size() >= connectionStackLimit_)) {
delete connection;
--numTConnections_;
} else {
connection->checkIdleBufferMemLimit(idleReadBufferLimit_, idleWriteBufferLimit_);
connectionStack_.push(connection);
}
}
/**
* Server socket had something happen. We accept all waiting client
* connections on fd and assign TConnection objects to handle those requests.
*/
void TNonblockingServer::handleEvent(THRIFT_SOCKET fd, short which) {
(void)which;
// Make sure that libevent didn't mess up the socket handles
assert(fd == serverSocket_);
// Server socket accepted a new connection
socklen_t addrLen;
sockaddr_storage addrStorage;
sockaddr* addrp = (sockaddr*)&addrStorage;
addrLen = sizeof(addrStorage);
// Going to accept a new client socket
THRIFT_SOCKET clientSocket;
// Accept as many new clients as possible, even though libevent signaled only
// one, this helps us to avoid having to go back into the libevent engine so
// many times
while ((clientSocket = ::accept(fd, addrp, &addrLen)) != -1) {
// If we're overloaded, take action here
if (overloadAction_ != T_OVERLOAD_NO_ACTION && serverOverloaded()) {
Guard g(connMutex_);
nConnectionsDropped_++;
nTotalConnectionsDropped_++;
if (overloadAction_ == T_OVERLOAD_CLOSE_ON_ACCEPT) {
::THRIFT_CLOSESOCKET(clientSocket);
return;
} else if (overloadAction_ == T_OVERLOAD_DRAIN_TASK_QUEUE) {
if (!drainPendingTask()) {
// Nothing left to discard, so we drop connection instead.
::THRIFT_CLOSESOCKET(clientSocket);
return;
}
}
}
// Explicitly set this socket to NONBLOCK mode
int flags;
if ((flags = THRIFT_FCNTL(clientSocket, THRIFT_F_GETFL, 0)) < 0
|| THRIFT_FCNTL(clientSocket, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK) < 0) {
GlobalOutput.perror("thriftServerEventHandler: set THRIFT_O_NONBLOCK (THRIFT_FCNTL) ",
THRIFT_GET_SOCKET_ERROR);
::THRIFT_CLOSESOCKET(clientSocket);
return;
}
// Create a new TConnection for this client socket.
TConnection* clientConnection = createConnection(clientSocket, addrp, addrLen);
// Fail fast if we could not create a TConnection object
if (clientConnection == NULL) {
GlobalOutput.printf("thriftServerEventHandler: failed TConnection factory");
::THRIFT_CLOSESOCKET(clientSocket);
return;
}
/*
* Either notify the ioThread that is assigned this connection to
* start processing, or if it is us, we'll just ask this
* connection to do its initial state change here.
*
* (We need to avoid writing to our own notification pipe, to
* avoid possible deadlocks if the pipe is full.)
*
* The IO thread #0 is the only one that handles these listen
* events, so unless the connection has been assigned to thread #0
* we know it's not on our thread.
*/
if (clientConnection->getIOThreadNumber() == 0) {
clientConnection->transition();
} else {
if (!clientConnection->notifyIOThread()) {
GlobalOutput.perror("[ERROR] notifyIOThread failed on fresh connection, closing", errno);
returnConnection(clientConnection);
}
}
// addrLen is written by the accept() call, so needs to be set before the next call.
addrLen = sizeof(addrStorage);
}
// Done looping accept, now we have to make sure the error is due to
// blocking. Any other error is a problem
if (THRIFT_GET_SOCKET_ERROR != THRIFT_EAGAIN && THRIFT_GET_SOCKET_ERROR != THRIFT_EWOULDBLOCK) {
GlobalOutput.perror("thriftServerEventHandler: accept() ", THRIFT_GET_SOCKET_ERROR);
}
}
/**
* Creates a socket to listen on and binds it to the local port.
*/
void TNonblockingServer::createAndListenOnSocket() {
#ifdef _WIN32
TWinsockSingleton::create();
#endif // _WIN32
THRIFT_SOCKET s;
struct addrinfo hints, *res, *res0;
int error;
char port[sizeof("65536") + 1];
memset(&hints, 0, sizeof(hints));
hints.ai_family = PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG;
sprintf(port, "%d", port_);
// Wildcard address
error = getaddrinfo(NULL, port, &hints, &res0);
if (error) {
throw TException("TNonblockingServer::serve() getaddrinfo "
+ string(THRIFT_GAI_STRERROR(error)));
}
// Pick the ipv6 address first since ipv4 addresses can be mapped
// into ipv6 space.
for (res = res0; res; res = res->ai_next) {
if (res->ai_family == AF_INET6 || res->ai_next == NULL)
break;
}
// Create the server socket
s = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (s == -1) {
freeaddrinfo(res0);
throw TException("TNonblockingServer::serve() socket() -1");
}
#ifdef IPV6_V6ONLY
if (res->ai_family == AF_INET6) {
int zero = 0;
if (-1 == setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, const_cast_sockopt(&zero), sizeof(zero))) {
GlobalOutput("TServerSocket::listen() IPV6_V6ONLY");
}
}
#endif // #ifdef IPV6_V6ONLY
int one = 1;
// Set THRIFT_NO_SOCKET_CACHING to avoid 2MSL delay on server restart
setsockopt(s, SOL_SOCKET, THRIFT_NO_SOCKET_CACHING, const_cast_sockopt(&one), sizeof(one));
if (::bind(s, res->ai_addr, static_cast<int>(res->ai_addrlen)) == -1) {
::THRIFT_CLOSESOCKET(s);
freeaddrinfo(res0);
throw TTransportException(TTransportException::NOT_OPEN,
"TNonblockingServer::serve() bind",
THRIFT_GET_SOCKET_ERROR);
}
// Done with the addr info
freeaddrinfo(res0);
// Set up this file descriptor for listening
listenSocket(s);
}
/**
* Takes a socket created by listenSocket() and sets various options on it
* to prepare for use in the server.
*/
void TNonblockingServer::listenSocket(THRIFT_SOCKET s) {
// Set socket to nonblocking mode
int flags;
if ((flags = THRIFT_FCNTL(s, THRIFT_F_GETFL, 0)) < 0
|| THRIFT_FCNTL(s, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK) < 0) {
::THRIFT_CLOSESOCKET(s);
throw TException("TNonblockingServer::serve() THRIFT_O_NONBLOCK");
}
int one = 1;
struct linger ling = {0, 0};
// Keepalive to ensure full result flushing
setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, const_cast_sockopt(&one), sizeof(one));
// Turn linger off to avoid hung sockets
setsockopt(s, SOL_SOCKET, SO_LINGER, const_cast_sockopt(&ling), sizeof(ling));
// Set TCP nodelay if available, MAC OS X Hack
// See http://lists.danga.com/pipermail/memcached/2005-March/001240.html
#ifndef TCP_NOPUSH
setsockopt(s, IPPROTO_TCP, TCP_NODELAY, const_cast_sockopt(&one), sizeof(one));
#endif
#ifdef TCP_LOW_MIN_RTO
if (TSocket::getUseLowMinRto()) {
setsockopt(s, IPPROTO_TCP, TCP_LOW_MIN_RTO, const_cast_sockopt(&one), sizeof(one));
}
#endif
if (listen(s, LISTEN_BACKLOG) == -1) {
::THRIFT_CLOSESOCKET(s);
throw TException("TNonblockingServer::serve() listen");
}
// Cool, this socket is good to go, set it as the serverSocket_
serverSocket_ = s;
if (!port_) {
struct sockaddr_storage addr;
socklen_t size = sizeof(addr);
if (!getsockname(serverSocket_, reinterpret_cast<sockaddr*>(&addr), &size)) {
if (addr.ss_family == AF_INET6) {
const struct sockaddr_in6* sin = reinterpret_cast<const struct sockaddr_in6*>(&addr);
listenPort_ = ntohs(sin->sin6_port);
} else {
const struct sockaddr_in* sin = reinterpret_cast<const struct sockaddr_in*>(&addr);
listenPort_ = ntohs(sin->sin_port);
}
} else {
GlobalOutput.perror("TNonblocking: failed to get listen port: ", THRIFT_GET_SOCKET_ERROR);
}
}
}
void TNonblockingServer::setThreadManager(boost::shared_ptr<ThreadManager> threadManager) {
threadManager_ = threadManager;
if (threadManager) {
threadManager->setExpireCallback(
apache::thrift::stdcxx::bind(&TNonblockingServer::expireClose,
this,
apache::thrift::stdcxx::placeholders::_1));
threadPoolProcessing_ = true;
} else {
threadPoolProcessing_ = false;
}
}
bool TNonblockingServer::serverOverloaded() {
size_t activeConnections = numTConnections_ - connectionStack_.size();
if (numActiveProcessors_ > maxActiveProcessors_ || activeConnections > maxConnections_) {
if (!overloaded_) {
GlobalOutput.printf("TNonblockingServer: overload condition begun.");
overloaded_ = true;
}
} else {
if (overloaded_ && (numActiveProcessors_ <= overloadHysteresis_ * maxActiveProcessors_)
&& (activeConnections <= overloadHysteresis_ * maxConnections_)) {
GlobalOutput.printf(
"TNonblockingServer: overload ended; "
"%u dropped (%llu total)",
nConnectionsDropped_,
nTotalConnectionsDropped_);
nConnectionsDropped_ = 0;
overloaded_ = false;
}
}
return overloaded_;
}
bool TNonblockingServer::drainPendingTask() {
if (threadManager_) {
boost::shared_ptr<Runnable> task = threadManager_->removeNextPending();
if (task) {
TConnection* connection = static_cast<TConnection::Task*>(task.get())->getTConnection();
assert(connection && connection->getServer() && connection->getState() == APP_WAIT_TASK);
connection->forceClose();
return true;
}
}
return false;
}
void TNonblockingServer::expireClose(boost::shared_ptr<Runnable> task) {
TConnection* connection = static_cast<TConnection::Task*>(task.get())->getTConnection();
assert(connection && connection->getServer() && connection->getState() == APP_WAIT_TASK);
connection->forceClose();
}
void TNonblockingServer::stop() {
if (!port_) {
listenPort_ = 0;
}
// Breaks the event loop in all threads so that they end ASAP.
for (uint32_t i = 0; i < ioThreads_.size(); ++i) {
ioThreads_[i]->stop();
}
}
void TNonblockingServer::registerEvents(event_base* user_event_base) {
userEventBase_ = user_event_base;
// init listen socket
if (serverSocket_ == THRIFT_INVALID_SOCKET)
createAndListenOnSocket();
// set up the IO threads
assert(ioThreads_.empty());
if (!numIOThreads_) {
numIOThreads_ = DEFAULT_IO_THREADS;
}
// User-provided event-base doesn't works for multi-threaded servers
assert(numIOThreads_ == 1 || !userEventBase_);
for (uint32_t id = 0; id < numIOThreads_; ++id) {
// the first IO thread also does the listening on server socket
THRIFT_SOCKET listenFd = (id == 0 ? serverSocket_ : THRIFT_INVALID_SOCKET);
shared_ptr<TNonblockingIOThread> thread(
new TNonblockingIOThread(this, id, listenFd, useHighPriorityIOThreads_));
ioThreads_.push_back(thread);
}
// Notify handler of the preServe event
if (eventHandler_) {
eventHandler_->preServe();
}
// Start all of our helper IO threads. Note that the threads run forever,
// only terminating if stop() is called.
assert(ioThreads_.size() == numIOThreads_);
assert(ioThreads_.size() > 0);
GlobalOutput.printf("TNonblockingServer: Serving on port %d, %d io threads.",
listenPort_,
ioThreads_.size());
// Launch all the secondary IO threads in separate threads
if (ioThreads_.size() > 1) {
ioThreadFactory_.reset(new PlatformThreadFactory(
#if !USE_BOOST_THREAD && !USE_STD_THREAD
PlatformThreadFactory::OTHER, // scheduler
PlatformThreadFactory::NORMAL, // priority
1, // stack size (MB)
#endif
false // detached
));
assert(ioThreadFactory_.get());
// intentionally starting at thread 1, not 0
for (uint32_t i = 1; i < ioThreads_.size(); ++i) {
shared_ptr<Thread> thread = ioThreadFactory_->newThread(ioThreads_[i]);
ioThreads_[i]->setThread(thread);
thread->start();
}
}
// Register the events for the primary (listener) IO thread
ioThreads_[0]->registerEvents();
}
/**
* Main workhorse function, starts up the server listening on a port and
* loops over the libevent handler.
*/
void TNonblockingServer::serve() {
if (ioThreads_.empty())
registerEvents(NULL);
// Run the primary (listener) IO thread loop in our main thread; this will
// only return when the server is shutting down.
ioThreads_[0]->run();
// Ensure all threads are finished before exiting serve()
for (uint32_t i = 0; i < ioThreads_.size(); ++i) {
ioThreads_[i]->join();
GlobalOutput.printf("TNonblocking: join done for IO thread #%d", i);
}
}
TNonblockingIOThread::TNonblockingIOThread(TNonblockingServer* server,
int number,
THRIFT_SOCKET listenSocket,
bool useHighPriority)
: server_(server),
number_(number),
listenSocket_(listenSocket),
useHighPriority_(useHighPriority),
eventBase_(NULL),
ownEventBase_(false) {
notificationPipeFDs_[0] = -1;
notificationPipeFDs_[1] = -1;
}
TNonblockingIOThread::~TNonblockingIOThread() {
// make sure our associated thread is fully finished
join();
if (eventBase_ && ownEventBase_) {
event_base_free(eventBase_);
ownEventBase_ = false;
}
if (listenSocket_ >= 0) {
if (0 != ::THRIFT_CLOSESOCKET(listenSocket_)) {
GlobalOutput.perror("TNonblockingIOThread listenSocket_ close(): ", THRIFT_GET_SOCKET_ERROR);
}
listenSocket_ = THRIFT_INVALID_SOCKET;
}
for (int i = 0; i < 2; ++i) {
if (notificationPipeFDs_[i] >= 0) {
if (0 != ::THRIFT_CLOSESOCKET(notificationPipeFDs_[i])) {
GlobalOutput.perror("TNonblockingIOThread notificationPipe close(): ",
THRIFT_GET_SOCKET_ERROR);
}
notificationPipeFDs_[i] = THRIFT_INVALID_SOCKET;
}
}
}
void TNonblockingIOThread::createNotificationPipe() {
if (evutil_socketpair(AF_LOCAL, SOCK_STREAM, 0, notificationPipeFDs_) == -1) {
GlobalOutput.perror("TNonblockingServer::createNotificationPipe ", EVUTIL_SOCKET_ERROR());
throw TException("can't create notification pipe");
}
if (evutil_make_socket_nonblocking(notificationPipeFDs_[0]) < 0
|| evutil_make_socket_nonblocking(notificationPipeFDs_[1]) < 0) {
::THRIFT_CLOSESOCKET(notificationPipeFDs_[0]);
::THRIFT_CLOSESOCKET(notificationPipeFDs_[1]);
throw TException("TNonblockingServer::createNotificationPipe() THRIFT_O_NONBLOCK");
}
for (int i = 0; i < 2; ++i) {
#if LIBEVENT_VERSION_NUMBER < 0x02000000
int flags;
if ((flags = THRIFT_FCNTL(notificationPipeFDs_[i], F_GETFD, 0)) < 0
|| THRIFT_FCNTL(notificationPipeFDs_[i], F_SETFD, flags | FD_CLOEXEC) < 0) {
#else
if (evutil_make_socket_closeonexec(notificationPipeFDs_[i]) < 0) {
#endif
::THRIFT_CLOSESOCKET(notificationPipeFDs_[0]);
::THRIFT_CLOSESOCKET(notificationPipeFDs_[1]);
throw TException(
"TNonblockingServer::createNotificationPipe() "
"FD_CLOEXEC");
}
}
}
/**
* Register the core libevent events onto the proper base.
*/
void TNonblockingIOThread::registerEvents() {
threadId_ = Thread::get_current();
assert(eventBase_ == 0);
eventBase_ = getServer()->getUserEventBase();
if (eventBase_ == NULL) {
eventBase_ = event_base_new();
ownEventBase_ = true;
}
// Print some libevent stats
if (number_ == 0) {
GlobalOutput.printf("TNonblockingServer: using libevent %s method %s",
event_get_version(),
event_base_get_method(eventBase_));
}
if (listenSocket_ >= 0) {
// Register the server event
event_set(&serverEvent_,
listenSocket_,
EV_READ | EV_PERSIST,
TNonblockingIOThread::listenHandler,
server_);
event_base_set(eventBase_, &serverEvent_);
// Add the event and start up the server
if (-1 == event_add(&serverEvent_, 0)) {
throw TException(
"TNonblockingServer::serve(): "
"event_add() failed on server listen event");
}
GlobalOutput.printf("TNonblocking: IO thread #%d registered for listen.", number_);
}
createNotificationPipe();
// Create an event to be notified when a task finishes
event_set(¬ificationEvent_,
getNotificationRecvFD(),
EV_READ | EV_PERSIST,
TNonblockingIOThread::notifyHandler,
this);
// Attach to the base
event_base_set(eventBase_, ¬ificationEvent_);
// Add the event and start up the server
if (-1 == event_add(¬ificationEvent_, 0)) {
throw TException(
"TNonblockingServer::serve(): "
"event_add() failed on task-done notification event");
}
GlobalOutput.printf("TNonblocking: IO thread #%d registered for notify.", number_);
}
bool TNonblockingIOThread::notify(TNonblockingServer::TConnection* conn) {
THRIFT_SOCKET fd = getNotificationSendFD();
if (fd < 0) {
return false;
}
fd_set wfds, efds;
long ret = -1;
long kSize = sizeof(conn);
const char* pos = (const char*)const_cast_sockopt(&conn);
while (kSize > 0) {
FD_ZERO(&wfds);
FD_ZERO(&efds);
FD_SET(fd, &wfds);
FD_SET(fd, &efds);
ret = select(static_cast<int>(fd + 1), NULL, &wfds, &efds, NULL);
if (ret < 0) {
return false;
} else if (ret == 0) {
continue;
}
if (FD_ISSET(fd, &efds)) {
::THRIFT_CLOSESOCKET(fd);
return false;
}
if (FD_ISSET(fd, &wfds)) {
ret = send(fd, pos, kSize, 0);
if (ret < 0) {
if (errno == EAGAIN) {
continue;
}
::THRIFT_CLOSESOCKET(fd);
return false;
}
kSize -= ret;
pos += ret;
}
}
return true;
}
/* static */
void TNonblockingIOThread::notifyHandler(evutil_socket_t fd, short which, void* v) {
TNonblockingIOThread* ioThread = (TNonblockingIOThread*)v;
assert(ioThread);
(void)which;
while (true) {
TNonblockingServer::TConnection* connection = 0;
const int kSize = sizeof(connection);
long nBytes = recv(fd, cast_sockopt(&connection), kSize, 0);
if (nBytes == kSize) {
if (connection == NULL) {
// this is the command to stop our thread, exit the handler!
return;
}
connection->transition();
} else if (nBytes > 0) {
// throw away these bytes and hope that next time we get a solid read
GlobalOutput.printf("notifyHandler: Bad read of %d bytes, wanted %d", nBytes, kSize);
ioThread->breakLoop(true);
return;
} else if (nBytes == 0) {
GlobalOutput.printf("notifyHandler: Notify socket closed!");
// exit the loop
break;
} else { // nBytes < 0
if (THRIFT_GET_SOCKET_ERROR != THRIFT_EWOULDBLOCK
&& THRIFT_GET_SOCKET_ERROR != THRIFT_EAGAIN) {
GlobalOutput.perror("TNonblocking: notifyHandler read() failed: ", THRIFT_GET_SOCKET_ERROR);
ioThread->breakLoop(true);
return;
}
// exit the loop
break;
}
}
}
void TNonblockingIOThread::breakLoop(bool error) {
if (error) {
GlobalOutput.printf("TNonblockingServer: IO thread #%d exiting with error.", number_);
// TODO: figure out something better to do here, but for now kill the
// whole process.
GlobalOutput.printf("TNonblockingServer: aborting process.");
::abort();
}
// sets a flag so that the loop exits on the next event
event_base_loopbreak(eventBase_);
// event_base_loopbreak() only causes the loop to exit the next time
// it wakes up. We need to force it to wake up, in case there are
// no real events it needs to process.
//
// If we're running in the same thread, we can't use the notify(0)
// mechanism to stop the thread, but happily if we're running in the
// same thread, this means the thread can't be blocking in the event
// loop either.
if (!Thread::is_current(threadId_)) {
notify(NULL);
}
}
void TNonblockingIOThread::setCurrentThreadHighPriority(bool value) {
#ifdef HAVE_SCHED_H
// Start out with a standard, low-priority setup for the sched params.
struct sched_param sp;
bzero((void*)&sp, sizeof(sp));
int policy = SCHED_OTHER;
// If desired, set up high-priority sched params structure.
if (value) {
// FIFO scheduler, ranked above default SCHED_OTHER queue
policy = SCHED_FIFO;
// The priority only compares us to other SCHED_FIFO threads, so we
// just pick a random priority halfway between min & max.
const int priority = (sched_get_priority_max(policy) + sched_get_priority_min(policy)) / 2;
sp.sched_priority = priority;
}
// Actually set the sched params for the current thread.
if (0 == pthread_setschedparam(pthread_self(), policy, &sp)) {
GlobalOutput.printf("TNonblocking: IO Thread #%d using high-priority scheduler!", number_);
} else {
GlobalOutput.perror("TNonblocking: pthread_setschedparam(): ", THRIFT_GET_SOCKET_ERROR);
}
#else
THRIFT_UNUSED_VARIABLE(value);
#endif
}
void TNonblockingIOThread::run() {
if (eventBase_ == NULL)
registerEvents();
GlobalOutput.printf("TNonblockingServer: IO thread #%d entering loop...", number_);
if (useHighPriority_) {
setCurrentThreadHighPriority(true);
}
// Run libevent engine, never returns, invokes calls to eventHandler
event_base_loop(eventBase_, 0);
if (useHighPriority_) {
setCurrentThreadHighPriority(false);
}
// cleans up our registered events
cleanupEvents();
GlobalOutput.printf("TNonblockingServer: IO thread #%d run() done!", number_);
}
void TNonblockingIOThread::cleanupEvents() {
// stop the listen socket, if any
if (listenSocket_ >= 0) {
if (event_del(&serverEvent_) == -1) {
GlobalOutput.perror("TNonblockingIOThread::stop() event_del: ", THRIFT_GET_SOCKET_ERROR);
}
}
event_del(¬ificationEvent_);
}
void TNonblockingIOThread::stop() {
// This should cause the thread to fall out of its event loop ASAP.
breakLoop(false);
}
void TNonblockingIOThread::join() {
// If this was a thread created by a factory (not the thread that called
// serve()), we join() it to make sure we shut down fully.
if (thread_) {
try {
// Note that it is safe to both join() ourselves twice, as well as join
// the current thread as the pthread implementation checks for deadlock.
thread_->join();
} catch (...) {
// swallow everything
}
}
}
}
}
} // apache::thrift::server
| 1 | 12,733 | Perhaps the only test needed is to see if the socket is not an invalid socket? Is the >= 0 test necessary at this point? | apache-thrift | c |
@@ -20,7 +20,16 @@
</div>
<% if current_user.subscription && current_user.stripe_customer %>
- <%= render 'credit_card_form' %>
+ <div class='text-box'>
+ <%= semantic_form_for current_user.subscription, url: subscription_path(current_user.subscription) do |form| %>
+ <%= form.inputs "Your Subscription Billing Info", id: 'billing-information' do %>
+ <%= render 'shared/credit_card_form' %>
+ <% end %>
+ <%= form.actions do %>
+ <%= form.action :submit, label: 'Update Your Card' %>
+ <% end %>
+ <% end %>
+ </div>
<% end %>
</div>
| 1 | <%= content_for :subject, 'Account' %>
<div class="text-box-wrapper">
<div class="text-box">
<%= semantic_form_for current_user, url: my_account_path do |form| %>
<%= form.inputs "Your Information [#{link_to('Sign out', sign_out_path, method: :delete)}]" do %>
<%= form.input :first_name, label: 'Your Name', input_html: { placeholder: "First name" } %>
<%= form.input :last_name, label: false, input_html: { placeholder: "Last name" } %>
<%= form.input :email, as: :email %>
<%= form.input :github_username %>
<% if !current_user.external_auth? %>
<%= form.input :password %>
<% end %>
<% end %>
<%= form.actions do %>
<li><%= form.submit 'Update account' %></li>
<% end %>
<% end %>
</div>
<% if current_user.subscription && current_user.stripe_customer %>
<%= render 'credit_card_form' %>
<% end %>
</div>
<aside id="account-sidebar">
<% if current_user.has_purchased? %>
<h3>Your purchases</h3>
<ol class="purchases">
<%= render current_user.paid_purchases %>
</ol>
<p class="chat">Every product includes support for any questions you may have about the topic. Visit our <%= link_to "live chat", CHAT_LINK %>.</p>
<% end %>
<% if current_user.has_active_subscription? %>
<h3>Your Subscription</h3>
<ol class="purchases">
<%= render current_user.subscription %>
</ol>
<% end %>
</aside>
<% content_for :javascript do -%>
<script type="text/javascript" charset="utf-8">
Stripe.setPublishableKey('<%= STRIPE_PUBLIC_KEY %>');
stripeResponseHandler = function(status, response){
if (response.error) {
$('fieldset.actions input').removeAttr('disabled');
$('.subscription-errors').html(response.error.message);
} else {
$form = $('form.subscription');
token = response['id'];
$form.append("<input type='hidden' name='stripe_token' value='" + token + "' />");
$form.get(0).submit();
}
}
$(document).ready(function() {
$('form.subscription').submit(function(event){
$form = $(this);
$form.find('fieldset.actions input').prop('disabled', true);
Stripe.createToken({
number: $('.card-number').val(),
cvc: $('.card-cvc').val(),
exp_month: $('.card-expiry-month').val(),
exp_year: $('.card-expiry-year').val()
}, stripeResponseHandler);
return false;
});
});
</script>
<% end %>
| 1 | 7,332 | Could this line use `url: current_user.subscription`? | thoughtbot-upcase | rb |
@@ -477,7 +477,7 @@ class Context
$quoted_remove_var_id = preg_quote($remove_var_id);
foreach ($clause->possibilities as $var_id => $_) {
- if (preg_match('/' . $quoted_remove_var_id . '[\]\[\-]/', $var_id)) {
+ if (preg_match('/' . preg_quote($quoted_remove_var_id, '/') . '[\]\[\-]/', $var_id)) {
break 2;
}
} | 1 | <?php
namespace Psalm;
use PhpParser;
use Psalm\Checker\StatementsChecker;
use Psalm\Storage\FunctionLikeStorage;
use Psalm\Type\Reconciler;
use Psalm\Type\Union;
class Context
{
/**
* @var array<string, Type\Union>
*/
public $vars_in_scope = [];
/**
* @var array<string, bool>
*/
public $vars_possibly_in_scope = [];
/**
* Whether or not we're inside the conditional of an if/where etc.
*
* This changes whether or not the context is cloned
*
* @var bool
*/
public $inside_conditional = false;
/**
* Whether or not we're inside a __construct function
*
* @var bool
*/
public $inside_constructor = false;
/**
* Whether or not we're inside an isset call
*
* Inside isssets Psalm is more lenient about certain things
*
* @var bool
*/
public $inside_isset = false;
/**
* Whether or not we're inside an unset call, where
* we don't care about possibly undefined variables
*
* @var bool
*/
public $inside_unset = false;
/**
* Whether or not we're inside an class_exists call, where
* we don't care about possibly undefined classes
*
* @var bool
*/
public $inside_class_exists = false;
/**
* @var null|CodeLocation
*/
public $include_location = null;
/**
* @var string|null
*/
public $self;
/**
* @var string|null
*/
public $parent;
/**
* @var bool
*/
public $check_classes = true;
/**
* @var bool
*/
public $check_variables = true;
/**
* @var bool
*/
public $check_methods = true;
/**
* @var bool
*/
public $check_consts = true;
/**
* @var bool
*/
public $check_functions = true;
/**
* A list of classes checked with class_exists
*
* @var array<string,bool>
*/
private $phantom_classes = [];
/**
* A list of clauses in Conjunctive Normal Form
*
* @var array<int, Clause>
*/
public $clauses = [];
/**
* Whether or not to do a deep analysis and collect mutations to this context
*
* @var bool
*/
public $collect_mutations = false;
/**
* Whether or not to do a deep analysis and collect initializations from private methods
*
* @var bool
*/
public $collect_initializations = false;
/**
* Stored to prevent re-analysing methods when checking for initialised properties
*
* @var array<string, bool>|null
*/
public $initialized_methods = null;
/**
* @var array<string, Type\Union>
*/
public $constants = [];
/**
* Whether or not to track how many times a variable is used
*
* @var bool
*/
public $collect_references = false;
/**
* Whether or not to track exceptions
*
* @var bool
*/
public $collect_exceptions = false;
/**
* A list of variables that have been referenced
*
* @var array<string, bool>
*/
public $referenced_var_ids = [];
/**
* A list of variables that have never been referenced
*
* @var array<string, array<string, CodeLocation>>
*/
public $unreferenced_vars = [];
/**
* A list of variables that have been passed by reference (where we know their type)
*
* @var array<string, \Psalm\ReferenceConstraint>|null
*/
public $byref_constraints;
/**
* If this context inherits from a context, it is here
*
* @var Context|null
*/
public $parent_context;
/**
* @var array<string, Type\Union>
*/
public $possible_param_types = [];
/**
* A list of vars that have been assigned to
*
* @var array<string, bool>
*/
public $assigned_var_ids = [];
/**
* A list of vars that have been may have been assigned to
*
* @var array<string, bool>
*/
public $possibly_assigned_var_ids = [];
/**
* A list of classes or interfaces that may have been thrown
*
* @var array<string, bool>
*/
public $possibly_thrown_exceptions = [];
/**
* @var bool
*/
public $is_global = false;
/**
* @var array<string, bool>
*/
public $protected_var_ids = [];
/**
* If we've branched from the main scope, a byte offset for where that branch happened
*
* @var int|null
*/
public $branch_point;
/**
* If we're inside case statements we allow continue; statements as an alias of break;
*
* @var bool
*/
public $inside_case = false;
/**
* @var bool
*/
public $inside_loop = false;
/**
* @var Scope\LoopScope|null
*/
public $loop_scope = null;
/**
* @var Scope\SwitchScope|null
*/
public $switch_scope = null;
/**
* @param string|null $self
*/
public function __construct($self = null)
{
$this->self = $self;
}
/**
* @return void
*/
public function __clone()
{
foreach ($this->vars_in_scope as &$type) {
$type = clone $type;
}
foreach ($this->clauses as &$clause) {
$clause = clone $clause;
}
foreach ($this->constants as &$constant) {
$constant = clone $constant;
}
}
/**
* Updates the parent context, looking at the changes within a block and then applying those changes, where
* necessary, to the parent context
*
* @param Context $start_context
* @param Context $end_context
* @param bool $has_leaving_statements whether or not the parent scope is abandoned between
* $start_context and $end_context
* @param array $vars_to_update
* @param array $updated_vars
*
* @return void
*/
public function update(
Context $start_context,
Context $end_context,
$has_leaving_statements,
array $vars_to_update,
array &$updated_vars
) {
foreach ($start_context->vars_in_scope as $var_id => $old_type) {
// this is only true if there was some sort of type negation
if (in_array($var_id, $vars_to_update, true)) {
// if we're leaving, we're effectively deleting the possibility of the if types
$new_type = !$has_leaving_statements && $end_context->hasVariable($var_id)
? $end_context->vars_in_scope[$var_id]
: null;
$existing_type = isset($this->vars_in_scope[$var_id]) ? $this->vars_in_scope[$var_id] : null;
if (!$existing_type) {
if ($new_type) {
$this->vars_in_scope[$var_id] = clone $new_type;
$updated_vars[$var_id] = true;
}
continue;
}
// if the type changed within the block of statements, process the replacement
// also never allow ourselves to remove all types from a union
if ((!$new_type || !$old_type->equals($new_type))
&& ($new_type || count($existing_type->getTypes()) > 1)
) {
$existing_type->substitute($old_type, $new_type);
if ($new_type && $new_type->from_docblock) {
$existing_type->setFromDocblock();
}
$updated_vars[$var_id] = true;
}
}
}
}
/**
* @param array<string, Type\Union> $new_vars_in_scope
* @param bool $include_new_vars
*
* @return array<string,Type\Union>
*/
public function getRedefinedVars(array $new_vars_in_scope, $include_new_vars = false)
{
$redefined_vars = [];
foreach ($this->vars_in_scope as $var_id => $this_type) {
if (!isset($new_vars_in_scope[$var_id])) {
if ($include_new_vars) {
$redefined_vars[$var_id] = $this_type;
}
continue;
}
$new_type = $new_vars_in_scope[$var_id];
if (!$this_type->failed_reconciliation
&& !$this_type->isEmpty()
&& !$new_type->isEmpty()
&& !$this_type->equals($new_type)
) {
$redefined_vars[$var_id] = $this_type;
}
}
return $redefined_vars;
}
/**
* @return void
*/
public function inferType(
PhpParser\Node\Expr $expr,
FunctionLikeStorage $function_storage,
Type\Union $inferred_type
) {
if (!isset($expr->inferredType)) {
return;
}
$expr_type = $expr->inferredType;
if (($expr_type->isMixed() || $expr_type->getId() === $inferred_type->getId())
&& $expr instanceof PhpParser\Node\Expr\Variable
&& is_string($expr->name)
&& !isset($this->assigned_var_ids['$' . $expr->name])
&& array_key_exists($expr->name, $function_storage->param_types)
&& !$function_storage->param_types[$expr->name]
) {
if (isset($this->possible_param_types[$expr->name])) {
$this->possible_param_types[$expr->name] = Type::combineUnionTypes(
$this->possible_param_types[$expr->name],
$inferred_type
);
} else {
$this->possible_param_types[$expr->name] = $inferred_type;
$this->vars_in_scope['$' . $expr->name] = clone $inferred_type;
}
}
}
/**
* @param Context $original_context
* @param Context $new_context
*
* @return array<int, string>
*/
public static function getNewOrUpdatedVarIds(Context $original_context, Context $new_context)
{
$redefined_var_ids = [];
foreach ($new_context->vars_in_scope as $var_id => $context_type) {
if (!isset($original_context->vars_in_scope[$var_id])
|| !$original_context->vars_in_scope[$var_id]->equals($context_type)
) {
$redefined_var_ids[] = $var_id;
}
}
return $redefined_var_ids;
}
/**
* @param string $remove_var_id
*
* @return void
*/
public function remove($remove_var_id)
{
unset(
$this->referenced_var_ids[$remove_var_id],
$this->vars_possibly_in_scope[$remove_var_id]
);
if (isset($this->vars_in_scope[$remove_var_id])) {
$existing_type = $this->vars_in_scope[$remove_var_id];
unset($this->vars_in_scope[$remove_var_id]);
$this->removeDescendents($remove_var_id, $existing_type);
}
}
/**
* @param string[] $changed_var_ids
*
* @return void
*/
public function removeReconciledClauses(array $changed_var_ids)
{
$this->clauses = array_filter(
$this->clauses,
/** @return bool */
function (Clause $c) use ($changed_var_ids) {
return count($c->possibilities) > 1
|| $c->wedge
|| !in_array(array_keys($c->possibilities)[0], $changed_var_ids, true);
}
);
}
/**
* @param string $remove_var_id
* @param Clause[] $clauses
* @param Union|null $new_type
* @param StatementsChecker|null $statements_checker
*
* @return array<int, Clause>
*/
public static function filterClauses(
$remove_var_id,
array $clauses,
Union $new_type = null,
StatementsChecker $statements_checker = null
) {
$new_type_string = $new_type ? $new_type->getId() : '';
$clauses_to_keep = [];
foreach ($clauses as $clause) {
\Psalm\Type\Algebra::calculateNegation($clause);
$quoted_remove_var_id = preg_quote($remove_var_id);
foreach ($clause->possibilities as $var_id => $_) {
if (preg_match('/' . $quoted_remove_var_id . '[\]\[\-]/', $var_id)) {
break 2;
}
}
if (!isset($clause->possibilities[$remove_var_id]) ||
$clause->possibilities[$remove_var_id] === [$new_type_string]
) {
$clauses_to_keep[] = $clause;
} elseif ($statements_checker &&
$new_type &&
!$new_type->isMixed()
) {
$type_changed = false;
// if the clause contains any possibilities that would be altered
// by the new type
foreach ($clause->possibilities[$remove_var_id] as $type) {
// empty and !empty are not definitive for arrays and scalar types
if (($type === '!falsy' || $type === 'falsy') &&
($new_type->hasArray() || $new_type->hasPossiblyNumericType())
) {
$type_changed = true;
break;
}
$result_type = Reconciler::reconcileTypes(
$type,
clone $new_type,
null,
$statements_checker,
null,
[],
$failed_reconciliation
);
if ($result_type->getId() !== $new_type_string) {
$type_changed = true;
break;
}
}
if (!$type_changed) {
$clauses_to_keep[] = $clause;
}
}
}
return $clauses_to_keep;
}
/**
* @param string $remove_var_id
* @param Union|null $new_type
* @param null|StatementsChecker $statements_checker
*
* @return void
*/
public function removeVarFromConflictingClauses(
$remove_var_id,
Union $new_type = null,
StatementsChecker $statements_checker = null
) {
$this->clauses = self::filterClauses($remove_var_id, $this->clauses, $new_type, $statements_checker);
if ($this->parent_context) {
$this->parent_context->removeVarFromConflictingClauses($remove_var_id);
}
}
/**
* @param string $remove_var_id
* @param \Psalm\Type\Union|null $existing_type
* @param \Psalm\Type\Union|null $new_type
* @param null|StatementsChecker $statements_checker
*
* @return void
*/
public function removeDescendents(
$remove_var_id,
Union $existing_type = null,
Union $new_type = null,
StatementsChecker $statements_checker = null
) {
if (!$existing_type && isset($this->vars_in_scope[$remove_var_id])) {
$existing_type = $this->vars_in_scope[$remove_var_id];
}
if (!$existing_type) {
return;
}
if ($this->clauses) {
$this->removeVarFromConflictingClauses(
$remove_var_id,
$existing_type->isMixed()
|| ($new_type && $existing_type->from_docblock !== $new_type->from_docblock)
? null
: $new_type,
$statements_checker
);
}
$vars_to_remove = [];
foreach ($this->vars_in_scope as $var_id => $_) {
if (preg_match('/' . preg_quote($remove_var_id, DIRECTORY_SEPARATOR) . '[\]\[\-]/', $var_id)) {
$vars_to_remove[] = $var_id;
}
}
foreach ($vars_to_remove as $var_id) {
unset($this->vars_in_scope[$var_id]);
}
}
/**
* @return void
*/
public function removeAllObjectVars()
{
$vars_to_remove = [];
foreach ($this->vars_in_scope as $var_id => $_) {
if (strpos($var_id, '->') !== false || strpos($var_id, '::') !== false) {
$vars_to_remove[] = $var_id;
}
}
if (!$vars_to_remove) {
return;
}
foreach ($vars_to_remove as $var_id) {
unset($this->vars_in_scope[$var_id], $this->vars_possibly_in_scope[$var_id]);
}
$clauses_to_keep = [];
foreach ($this->clauses as $clause) {
$abandon_clause = false;
foreach (array_keys($clause->possibilities) as $key) {
if (strpos($key, '->') !== false || strpos($key, '::') !== false) {
$abandon_clause = true;
break;
}
}
if (!$abandon_clause) {
$clauses_to_keep[] = $clause;
}
}
$this->clauses = $clauses_to_keep;
}
/**
* @param Context $op_context
*
* @return void
*/
public function updateChecks(Context $op_context)
{
$this->check_classes = $this->check_classes && $op_context->check_classes;
$this->check_variables = $this->check_variables && $op_context->check_variables;
$this->check_methods = $this->check_methods && $op_context->check_methods;
$this->check_functions = $this->check_functions && $op_context->check_functions;
$this->check_consts = $this->check_consts && $op_context->check_consts;
}
/**
* @param string $class_name
*
* @return bool
*/
public function isPhantomClass($class_name)
{
return isset($this->phantom_classes[strtolower($class_name)]);
}
/**
* @param string $class_name
*
* @return void
*/
public function addPhantomClass($class_name)
{
$this->phantom_classes[strtolower($class_name)] = true;
}
/**
* @return array<string, bool>
*/
public function getPhantomClasses()
{
return $this->phantom_classes;
}
/**
* @param string|null $var_name
*
* @return bool
*/
public function hasVariable($var_name, StatementsChecker $statements_checker = null)
{
if (!$var_name ||
(!isset($this->vars_possibly_in_scope[$var_name]) &&
!isset($this->vars_in_scope[$var_name]))
) {
return false;
}
$stripped_var = preg_replace('/(->|\[).*$/', '', $var_name);
if ($stripped_var[0] === '$' && $stripped_var !== '$this') {
$this->referenced_var_ids[$var_name] = true;
if ($this->collect_references && $statements_checker) {
if (isset($this->unreferenced_vars[$var_name])) {
$statements_checker->registerVariableUses($this->unreferenced_vars[$var_name]);
}
unset($this->unreferenced_vars[$var_name]);
}
}
return isset($this->vars_in_scope[$var_name]);
}
}
| 1 | 7,012 | I was seeing `Warning: preg_match(): Unknown modifier 'a' in /path/to/project/vendor/vimeo/psalm/src/Psalm/Context.php on line 480` here | vimeo-psalm | php |
@@ -1,10 +1,18 @@
package com.github.javaparser.ast.stmt;
+import com.github.javaparser.JavaParser;
+import com.github.javaparser.ParserConfiguration;
+import com.github.javaparser.StaticJavaParser;
import com.github.javaparser.ast.expr.Expression;
import com.github.javaparser.ast.expr.MethodCallExpr;
import com.github.javaparser.ast.expr.NameExpr;
+import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter;
import org.junit.jupiter.api.Test;
+import static com.github.javaparser.StaticJavaParser.parse;
+import static com.github.javaparser.utils.TestUtils.assertEqualsNoEol;
+import static com.github.javaparser.utils.Utils.EOL;
+
class BlockStmtTest {
@Test
void issue748AddingIdenticalStatementsDoesParentingRight() { | 1 | package com.github.javaparser.ast.stmt;
import com.github.javaparser.ast.expr.Expression;
import com.github.javaparser.ast.expr.MethodCallExpr;
import com.github.javaparser.ast.expr.NameExpr;
import org.junit.jupiter.api.Test;
class BlockStmtTest {
@Test
void issue748AddingIdenticalStatementsDoesParentingRight() {
BlockStmt blockStmt = new BlockStmt();
Expression exp = new NameExpr("x");
MethodCallExpr expression = new MethodCallExpr(exp, "y");
blockStmt.addStatement(expression);
blockStmt.addStatement(expression.clone());
// This fails when the issue exists:
String s = blockStmt.toString();
}
}
| 1 | 13,353 | The changes in this file are not necessary. | javaparser-javaparser | java |
@@ -1,6 +1,7 @@
package manager
import (
+ "github.com/kubeedge/kubeedge/pkg/apiserverlite/util"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog/v2" | 1 | package manager
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog/v2"
)
// Manager define the interface of a Manager, configmapManager and podManager implement it
type Manager interface {
Events() chan watch.Event
}
// CommonResourceEventHandler can be used by configmapManager and podManager
type CommonResourceEventHandler struct {
events chan watch.Event
}
func (c *CommonResourceEventHandler) obj2Event(t watch.EventType, obj interface{}) {
eventObj, ok := obj.(runtime.Object)
if !ok {
klog.Warningf("unknown type: %T, ignore", obj)
return
}
c.events <- watch.Event{Type: t, Object: eventObj}
}
// OnAdd handle Add event
func (c *CommonResourceEventHandler) OnAdd(obj interface{}) {
c.obj2Event(watch.Added, obj)
}
// OnUpdate handle Update event
func (c *CommonResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
c.obj2Event(watch.Modified, newObj)
}
// OnDelete handle Delete event
func (c *CommonResourceEventHandler) OnDelete(obj interface{}) {
c.obj2Event(watch.Deleted, obj)
}
// NewCommonResourceEventHandler create CommonResourceEventHandler used by configmapManager and podManager
func NewCommonResourceEventHandler(events chan watch.Event) *CommonResourceEventHandler {
return &CommonResourceEventHandler{events: events}
}
| 1 | 20,472 | run `make lint` to fix | kubeedge-kubeedge | go |
@@ -0,0 +1,7 @@
+from dagster import execute_pipeline
+from docs_snippets.guides.dagster.reexecution.pipeline.unreliable_pipeline import unreliable_pipeline
+
+
+def test_pipeline_compiles_and_executes():
+ result = execute_pipeline(unreliable_pipeline)
+ assert result | 1 | 1 | 14,392 | we will need `assert result.success` instead. result will always be not null because it returns an execution result including several metadata - when the execution fails, it'd return an execution result whose `success` attribute is false. | dagster-io-dagster | py |
|
@@ -52,6 +52,8 @@ type CStorVolumeReplica struct {
type CStorVolumeReplicaSpec struct {
TargetIP string `json:"targetIP"`
Capacity string `json:"capacity"`
+ // ZvolWorkers represents number of threads that executes client IOs
+ ZvolWorkers string `json:"zvolWorkers"`
}
// CStorVolumeReplicaPhase is to hold result of action. | 1 | /*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// CVRKey represents the properties of a cstorvolumereplica
type CVRKey string
const (
// CloneEnableKEY is used to enable/disable cloning for a cstorvolumereplica
CloneEnableKEY CVRKey = "openebs.io/cloned"
// SourceVolumeKey stores the name of source volume whose snapshot is used to
// create this cvr
SourceVolumeKey CVRKey = "openebs.io/source-volume"
// SnapshotNameKey stores the name of the snapshot being used to restore this replica
SnapshotNameKey CVRKey = "openebs.io/snapshot"
)
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +resource:path=cstorvolumereplica
// CStorVolumeReplica describes a cstor volume resource created as custom resource
type CStorVolumeReplica struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CStorVolumeReplicaSpec `json:"spec"`
Status CStorVolumeReplicaStatus `json:"status"`
}
// CStorVolumeReplicaSpec is the spec for a CStorVolumeReplica resource
type CStorVolumeReplicaSpec struct {
TargetIP string `json:"targetIP"`
Capacity string `json:"capacity"`
}
// CStorVolumeReplicaPhase is to hold result of action.
type CStorVolumeReplicaPhase string
// Status written onto CStorVolumeReplica objects.
const (
// CVRStatusEmpty ensures the create operation is to be done, if import fails.
CVRStatusEmpty CStorVolumeReplicaPhase = ""
// CVRStatusOnline ensures the resource is available.
CVRStatusOnline CStorVolumeReplicaPhase = "Healthy"
// CVRStatusOffline ensures the resource is not available.
CVRStatusOffline CStorVolumeReplicaPhase = "Offline"
// CVRStatusDegraded means that the rebuilding has not yet started.
CVRStatusDegraded CStorVolumeReplicaPhase = "Degraded"
// CVRStatusRebuilding means that the volume is in re-building phase.
CVRStatusRebuilding CStorVolumeReplicaPhase = "Rebuilding"
// CVRStatusRebuilding means that the volume status could not be found.
CVRStatusError CStorVolumeReplicaPhase = "Error"
// CVRStatusDeletionFailed ensures the resource deletion has failed.
CVRStatusDeletionFailed CStorVolumeReplicaPhase = "Error"
// CVRStatusInvalid ensures invalid resource.
CVRStatusInvalid CStorVolumeReplicaPhase = "Invalid"
// CVRStatusErrorDuplicate ensures error due to duplicate resource.
CVRStatusErrorDuplicate CStorVolumeReplicaPhase = "Invalid"
// CVRStatusInit ensures Init task of cvr resource.
CVRStatusInit CStorVolumeReplicaPhase = "Init"
// CVRStatusRecreate ensures recreation task of cvr resource.
CVRStatusRecreate CStorVolumeReplicaPhase = "Recreate"
)
// CStorVolumeReplicaStatus is for handling status of cvr.
type CStorVolumeReplicaStatus struct {
Phase CStorVolumeReplicaPhase `json:"phase"`
Capacity CStorVolumeCapacityAttr `json:"capacity"`
}
// CStorVolumeCapacityAttr is for storing the volume capacity.
type CStorVolumeCapacityAttr struct {
TotalAllocated string `json:"totalAllocated"`
Used string `json:"used"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +resource:path=cstorvolumereplicas
// CStorVolumeReplicaList is a list of CStorVolumeReplica resources
type CStorVolumeReplicaList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []CStorVolumeReplica `json:"items"`
}
| 1 | 11,776 | Can we mention how can one determine the best value given an infrastructure? Does it default to any value? | openebs-maya | go |
@@ -1,4 +1,6 @@
-<?php namespace Backend\Widgets;
+<?php
+
+namespace Backend\Widgets;
use Lang;
use Backend\Classes\WidgetBase; | 1 | <?php namespace Backend\Widgets;
use Lang;
use Backend\Classes\WidgetBase;
/**
* Search Widget
* Used for building a toolbar, Renders a search container.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class Search extends WidgetBase
{
//
// Configurable properties
//
/**
* @var string Search placeholder text.
*/
public $prompt;
/**
* @var bool Field show grow when selected.
*/
public $growable = true;
/**
* @var string Custom partial file definition, in context of the controller.
*/
public $partial;
/**
* @var string Defines the search mode. Commonly passed to the searchWhere() query.
*/
public $mode;
/**
* @var string Custom scope method name. Commonly passed to the query.
*/
public $scope;
//
// Object properties
//
/**
* @inheritDoc
*/
protected $defaultAlias = 'search';
/**
* @var string Active search term pulled from session data.
*/
protected $activeTerm;
/**
* @var array List of CSS classes to apply to the list container element.
*/
public $cssClasses = [];
/**
* Initialize the widget, called by the constructor and free from its parameters.
*/
public function init()
{
$this->fillFromConfig([
'prompt',
'partial',
'growable',
'scope',
'mode',
]);
/*
* Add CSS class styles
*/
$this->cssClasses[] = 'icon search';
if ($this->growable) {
$this->cssClasses[] = 'growable';
}
}
/**
* Renders the widget.
*/
public function render()
{
$this->prepareVars();
if ($this->partial) {
return $this->controller->makePartial($this->partial);
}
else {
return $this->makePartial('search');
}
}
/**
* Prepares the view data
*/
public function prepareVars()
{
$this->vars['cssClasses'] = implode(' ', $this->cssClasses);
$this->vars['placeholder'] = Lang::get($this->prompt);
$this->vars['value'] = $this->getActiveTerm();
}
/**
* Search field has been submitted.
*/
public function onSubmit()
{
/*
* Save or reset search term in session
*/
$this->setActiveTerm(post($this->getName()));
/*
* Trigger class event, merge results as viewable array
*/
$params = func_get_args();
$result = $this->fireEvent('search.submit', [$params]);
if ($result && is_array($result)) {
return call_user_func_array('array_merge', $result);
}
}
/**
* Returns an active search term for this widget instance.
*/
public function getActiveTerm()
{
return $this->activeTerm = $this->getSession('term', '');
}
/**
* Sets an active search term for this widget instance.
*/
public function setActiveTerm($term)
{
if (strlen($term)) {
$this->putSession('term', $term);
}
else {
$this->resetSession();
}
$this->activeTerm = $term;
}
/**
* Returns a value suitable for the field name property.
* @return string
*/
public function getName()
{
return $this->alias . '[term]';
}
}
| 1 | 13,243 | Leave this formatting the way it was please, that's the standard across all of October's files. | octobercms-october | php |
@@ -7,7 +7,7 @@ import (
"strings"
"github.com/opencontainers/runc/libcontainer/configs"
- "github.com/opencontainers/runc/libcontainer/selinux"
+ selinux "github.com/opencontainers/selinux/go-selinux"
)
type Validator interface { | 1 | package validate
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/selinux"
)
type Validator interface {
Validate(*configs.Config) error
}
func New() Validator {
return &ConfigValidator{}
}
type ConfigValidator struct {
}
func (v *ConfigValidator) Validate(config *configs.Config) error {
if err := v.rootfs(config); err != nil {
return err
}
if err := v.network(config); err != nil {
return err
}
if err := v.hostname(config); err != nil {
return err
}
if err := v.security(config); err != nil {
return err
}
if err := v.usernamespace(config); err != nil {
return err
}
if err := v.sysctl(config); err != nil {
return err
}
return nil
}
// rootfs validates if the rootfs is an absolute path and is not a symlink
// to the container's root filesystem.
func (v *ConfigValidator) rootfs(config *configs.Config) error {
if _, err := os.Stat(config.Rootfs); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("rootfs (%s) does not exist", config.Rootfs)
}
return err
}
cleaned, err := filepath.Abs(config.Rootfs)
if err != nil {
return err
}
if cleaned, err = filepath.EvalSymlinks(cleaned); err != nil {
return err
}
if filepath.Clean(config.Rootfs) != cleaned {
return fmt.Errorf("%s is not an absolute path or is a symlink", config.Rootfs)
}
return nil
}
func (v *ConfigValidator) network(config *configs.Config) error {
if !config.Namespaces.Contains(configs.NEWNET) {
if len(config.Networks) > 0 || len(config.Routes) > 0 {
return fmt.Errorf("unable to apply network settings without a private NET namespace")
}
}
return nil
}
func (v *ConfigValidator) hostname(config *configs.Config) error {
if config.Hostname != "" && !config.Namespaces.Contains(configs.NEWUTS) {
return fmt.Errorf("unable to set hostname without a private UTS namespace")
}
return nil
}
func (v *ConfigValidator) security(config *configs.Config) error {
// restrict sys without mount namespace
if (len(config.MaskPaths) > 0 || len(config.ReadonlyPaths) > 0) &&
!config.Namespaces.Contains(configs.NEWNS) {
return fmt.Errorf("unable to restrict sys entries without a private MNT namespace")
}
if config.ProcessLabel != "" && !selinux.SelinuxEnabled() {
return fmt.Errorf("selinux label is specified in config, but selinux is disabled or not supported")
}
return nil
}
func (v *ConfigValidator) usernamespace(config *configs.Config) error {
if config.Namespaces.Contains(configs.NEWUSER) {
if _, err := os.Stat("/proc/self/ns/user"); os.IsNotExist(err) {
return fmt.Errorf("USER namespaces aren't enabled in the kernel")
}
} else {
if config.UidMappings != nil || config.GidMappings != nil {
return fmt.Errorf("User namespace mappings specified, but USER namespace isn't enabled in the config")
}
}
return nil
}
// sysctl validates that the specified sysctl keys are valid or not.
// /proc/sys isn't completely namespaced and depending on which namespaces
// are specified, a subset of sysctls are permitted.
func (v *ConfigValidator) sysctl(config *configs.Config) error {
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
for s := range config.Sysctl {
if validSysctlMap[s] || strings.HasPrefix(s, "fs.mqueue.") {
if config.Namespaces.Contains(configs.NEWIPC) {
continue
} else {
return fmt.Errorf("sysctl %q is not allowed in the hosts ipc namespace", s)
}
}
if strings.HasPrefix(s, "net.") {
if config.Namespaces.Contains(configs.NEWNET) {
if path := config.Namespaces.PathOf(configs.NEWNET); path != "" {
if err := checkHostNs(s, path); err != nil {
return err
}
}
continue
} else {
return fmt.Errorf("sysctl %q is not allowed in the hosts network namespace", s)
}
}
return fmt.Errorf("sysctl %q is not in a separate kernel namespace", s)
}
return nil
}
func isSymbolicLink(path string) (bool, error) {
fi, err := os.Lstat(path)
if err != nil {
return false, err
}
return fi.Mode()&os.ModeSymlink == os.ModeSymlink, nil
}
// checkHostNs checks whether network sysctl is used in host namespace.
func checkHostNs(sysctlConfig string, path string) error {
var currentProcessNetns = "/proc/self/ns/net"
// readlink on the current processes network namespace
destOfCurrentProcess, err := os.Readlink(currentProcessNetns)
if err != nil {
return fmt.Errorf("read soft link %q error", currentProcessNetns)
}
// First check if the provided path is a symbolic link
symLink, err := isSymbolicLink(path)
if err != nil {
return fmt.Errorf("could not check that %q is a symlink: %v", path, err)
}
if symLink == false {
// The provided namespace is not a symbolic link,
// it is not the host namespace.
return nil
}
// readlink on the path provided in the struct
destOfContainer, err := os.Readlink(path)
if err != nil {
return fmt.Errorf("read soft link %q error", path)
}
if destOfContainer == destOfCurrentProcess {
return fmt.Errorf("sysctl %q is not allowed in the hosts network namespace", sysctlConfig)
}
return nil
}
| 1 | 14,371 | @runcom says there is no need to use `selinux` here. `selinux` "github.com/opencontainers/selinux/go-selinux" The code should work fine without it. | opencontainers-runc | go |
@@ -20,8 +20,8 @@ CREATE_PROJECT_TABLE = """
`project_number` bigint(20) NOT NULL,
`project_id` varchar(255) NOT NULL,
`project_name` varchar(255) DEFAULT NULL,
- `lifecycle_state` enum('ACTIVE','DELETE_REQUESTED',
- 'DELETE_IN_PROGRESS','DELETED') DEFAULT NULL,
+ `lifecycle_state` enum('LIFECYCLE_STATE_UNSPECIFIED','ACTIVE',
+ 'DELETE_REQUESTED','DELETED') NOT NULL,
`parent_type` varchar(255) DEFAULT NULL,
`parent_id` varchar(255) DEFAULT NULL,
`raw_project` json DEFAULT NULL, | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQL queries to create Cloud SQL tables."""
CREATE_PROJECT_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) NOT NULL,
`project_id` varchar(255) NOT NULL,
`project_name` varchar(255) DEFAULT NULL,
`lifecycle_state` enum('ACTIVE','DELETE_REQUESTED',
'DELETE_IN_PROGRESS','DELETED') DEFAULT NULL,
`parent_type` varchar(255) DEFAULT NULL,
`parent_id` varchar(255) DEFAULT NULL,
`raw_project` json DEFAULT NULL,
`create_time` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `project_id_UNIQUE` (`project_id`),
UNIQUE KEY `project_number_UNIQUE` (`project_number`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_PROJECT_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_PROJECT_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`project_number` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_ORG_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`org_id` bigint(20) DEFAULT NULL,
`role` varchar(255) DEFAULT NULL,
`member_type` varchar(255) DEFAULT NULL,
`member_name` varchar(255) DEFAULT NULL,
`member_domain` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
CREATE_RAW_ORG_IAM_POLICIES_TABLE = """
CREATE TABLE `{0}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
`org_id` bigint(20) DEFAULT NULL,
`iam_policy` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
| 1 | 25,612 | nit: It might be worth defining these elsewhere as a python-type? | forseti-security-forseti-security | py |
@@ -151,6 +151,9 @@ public class Constants {
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
+
+ // enable Quartz Scheduler if true.
+ public static final String ENABLE_QUARTZ= "enable.quartz";
}
public static class FlowProperties { | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*/
public class Constants {
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port";
public static final String AZKABAN_EXECUTOR_PORT_FILE = "executor.portfile";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
public static class ConfigurationKeys {
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// Defines a list of external links, each referred to as a topic
public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// The property is used for the web server to get the host name of the executor when running in SOLO mode.
public static final String EXECUTOR_HOST = "executor.host";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
public static final String AZKABAN_STORAGE_HDFS_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
}
| 1 | 14,949 | prefix it with azkaban. Maybe azkaban.server.schedule.enable_quartz? | azkaban-azkaban | java |
@@ -145,6 +145,13 @@ module Bolt
end
rescue Bolt::Error => e
e
+ rescue Puppet::DataBinding::LookupError => e
+ if /Undefined variable/.match(e.message)
+ message = "Interpolations are not supported in lookups outside of an apply block: #{e.message}"
+ PALError.new(message)
+ else
+ PALError.from_preformatted_error(e)
+ end
rescue Puppet::PreformattedError => e
PALError.from_preformatted_error(e)
rescue StandardError => e | 1 | # frozen_string_literal: true
require 'bolt/applicator'
require 'bolt/executor'
require 'bolt/error'
require 'bolt/plan_result'
require 'bolt/util'
require 'etc'
module Bolt
class PAL
BOLTLIB_PATH = File.expand_path('../../bolt-modules', __dir__)
MODULES_PATH = File.expand_path('../../modules', __dir__)
# PALError is used to convert errors from executing puppet code into
# Bolt::Errors
class PALError < Bolt::Error
# Puppet sometimes rescues exceptions notes the location and reraises.
# Return the original error.
def self.from_preformatted_error(err)
if err.cause&.is_a? Bolt::Error
err.cause
else
from_error(err.cause || err)
end
end
# Generate a Bolt::Pal::PALError for non-bolt errors
def self.from_error(err)
e = new(err.message)
e.set_backtrace(err.backtrace)
e
end
def initialize(msg)
super(msg, 'bolt/pal-error')
end
end
attr_reader :modulepath
def initialize(modulepath, hiera_config, resource_types, max_compiles = Etc.nprocessors,
trusted_external = nil, apply_settings = {})
# Nothing works without initialized this global state. Reinitializing
# is safe and in practice only happens in tests
self.class.load_puppet
@original_modulepath = modulepath
@modulepath = [BOLTLIB_PATH, *modulepath, MODULES_PATH]
@hiera_config = hiera_config
@trusted_external = trusted_external
@apply_settings = apply_settings
@max_compiles = max_compiles
@resource_types = resource_types
@logger = Logging.logger[self]
if modulepath && !modulepath.empty?
@logger.info("Loading modules from #{@modulepath.join(File::PATH_SEPARATOR)}")
end
@loaded = false
end
# Puppet logging is global so this is class method to avoid confusion
def self.configure_logging
Puppet::Util::Log.destinations.clear
Puppet::Util::Log.newdestination(Logging.logger['Puppet'])
# Defer all log level decisions to the Logging library by telling Puppet
# to log everything
Puppet.settings[:log_level] = 'debug'
end
def self.load_puppet
if Bolt::Util.windows?
# Windows 'fix' for openssl behaving strangely. Prevents very slow operation
# of random_bytes later when establishing winrm connections from a Windows host.
# See https://github.com/rails/rails/issues/25805 for background.
require 'openssl'
OpenSSL::Random.random_bytes(1)
end
begin
require 'puppet_pal'
rescue LoadError
raise Bolt::Error.new("Puppet must be installed to execute tasks", "bolt/puppet-missing")
end
require 'bolt/pal/logging'
require 'bolt/pal/issues'
require 'bolt/pal/yaml_plan/loader'
require 'bolt/pal/yaml_plan/transpiler'
# Now that puppet is loaded we can include puppet mixins in data types
Bolt::ResultSet.include_iterable
end
def setup
unless @loaded
# This is slow so don't do it until we have to
Bolt::PAL.load_puppet
# Make sure we don't create the puppet directories
with_puppet_settings { |_| nil }
@loaded = true
end
end
# Create a top-level alias for TargetSpec and PlanResult so that users don't have to
# namespace it with Boltlib, which is just an implementation detail. This
# allows them to feel like a built-in type in bolt, rather than
# something has been, no pun intended, "bolted on".
def alias_types(compiler)
compiler.evaluate_string('type TargetSpec = Boltlib::TargetSpec')
compiler.evaluate_string('type PlanResult = Boltlib::PlanResult')
end
# Register all resource types defined in $Project/.resource_types as well as
# the built in types registered with the runtime_3_init method.
def register_resource_types(loaders)
static_loader = loaders.static_loader
static_loader.runtime_3_init
if File.directory?(@resource_types)
Dir.children(@resource_types).each do |resource_pp|
type_name_from_file = File.basename(resource_pp, '.pp').capitalize
typed_name = Puppet::Pops::Loader::TypedName.new(:type, type_name_from_file)
resource_type = Puppet::Pops::Types::TypeFactory.resource(type_name_from_file)
loaders.static_loader.set_entry(typed_name, resource_type)
end
end
end
# Runs a block in a PAL script compiler configured for Bolt. Catches
# exceptions thrown by the block and re-raises them ensuring they are
# Bolt::Errors since the script compiler block will squash all exceptions.
def in_bolt_compiler
# TODO: If we always call this inside a bolt_executor we can remove this here
setup
r = Puppet::Pal.in_tmp_environment('bolt', modulepath: @modulepath, facts: {}) do |pal|
pal.with_script_compiler do |compiler|
alias_types(compiler)
register_resource_types(Puppet.lookup(:loaders)) if @resource_types
begin
Puppet.override(yaml_plan_instantiator: Bolt::PAL::YamlPlan::Loader) do
yield compiler
end
rescue Bolt::Error => e
e
rescue Puppet::PreformattedError => e
PALError.from_preformatted_error(e)
rescue StandardError => e
PALError.from_preformatted_error(e)
end
end
end
# Plans may return PuppetError but nothing should be throwing them
if r.is_a?(StandardError) && !r.is_a?(Bolt::PuppetError)
raise r
end
r
end
def with_bolt_executor(executor, inventory, pdb_client = nil, applicator = nil, &block)
setup
opts = {
bolt_executor: executor,
bolt_inventory: inventory,
bolt_pdb_client: pdb_client,
apply_executor: applicator || Applicator.new(
inventory,
executor,
@modulepath,
# Skip syncing built-in plugins, since we vendor some Puppet 6
# versions of "core" types, which are already present on the agent,
# but may cause issues on Puppet 5 agents.
@original_modulepath,
pdb_client,
@hiera_config,
@max_compiles,
@apply_settings
)
}
Puppet.override(opts, &block)
end
def in_plan_compiler(executor, inventory, pdb_client, applicator = nil)
with_bolt_executor(executor, inventory, pdb_client, applicator) do
# TODO: remove this call and see if anything breaks when
# settings dirs don't actually exist. Plans shouldn't
# actually be using them.
with_puppet_settings do
in_bolt_compiler do |compiler|
yield compiler
end
end
end
end
def in_task_compiler(executor, inventory)
with_bolt_executor(executor, inventory) do
in_bolt_compiler do |compiler|
yield compiler
end
end
end
# TODO: PUP-8553 should replace this
def with_puppet_settings
Dir.mktmpdir('bolt') do |dir|
cli = []
Puppet::Settings::REQUIRED_APP_SETTINGS.each do |setting|
cli << "--#{setting}" << dir
end
Puppet.settings.send(:clear_everything_for_tests)
Puppet.initialize_settings(cli)
Puppet::GettextConfig.create_default_text_domain
Puppet[:trusted_external_command] = @trusted_external
self.class.configure_logging
yield
end
end
# Parses a snippet of Puppet manifest code and returns the AST represented
# in JSON.
def parse_manifest(code, filename)
setup
Puppet::Pops::Parser::EvaluatingParser.new.parse_string(code, filename)
rescue Puppet::Error => e
raise Bolt::PAL::PALError, "Failed to parse manifest: #{e}"
end
def list_tasks
in_bolt_compiler do |compiler|
tasks = compiler.list_tasks
tasks.map(&:name).sort.each_with_object([]) do |task_name, data|
task_sig = compiler.task_signature(task_name)
unless task_sig.task_hash['metadata']['private']
data << [task_name, task_sig.task_hash['metadata']['description']]
end
end
end
end
def list_modulepath
@modulepath - [BOLTLIB_PATH, MODULES_PATH]
end
def parse_params(type, object_name, params)
in_bolt_compiler do |compiler|
if type == 'task'
param_spec = compiler.task_signature(object_name)&.task_hash&.dig('parameters')
elsif type == 'plan'
plan = compiler.plan_signature(object_name)
param_spec = plan.params_type.elements&.each_with_object({}) { |t, h| h[t.name] = t.value_type } if plan
end
param_spec ||= {}
params.each_with_object({}) do |(name, str), acc|
type = param_spec[name]
begin
parsed = JSON.parse(str, quirks_mode: true)
# The type may not exist if the module is remote on orch or if a task
# defines no parameters. Since we treat no parameters as Any we
# should parse everything in this case
acc[name] = if type && !type.instance?(parsed)
str
else
parsed
end
rescue JSON::ParserError
# This value may not be assignable in which case run_* will error
acc[name] = str
end
acc
end
end
end
def task_signature(task_name)
in_bolt_compiler do |compiler|
compiler.task_signature(task_name)
end
end
def get_task(task_name)
task = task_signature(task_name)
if task.nil?
raise Bolt::Error.unknown_task(task_name)
end
Bolt::Task.from_task_signature(task)
end
def list_plans
in_bolt_compiler do |compiler|
errors = []
plans = compiler.list_plans(nil, errors).map { |plan| [plan.name] }.sort
errors.each do |error|
@logger.warn(error.details['original_error'])
end
plans
end
end
def get_plan_info(plan_name)
plan_sig = in_bolt_compiler do |compiler|
compiler.plan_signature(plan_name)
end
if plan_sig.nil?
raise Bolt::Error.unknown_plan(plan_name)
end
mod = plan_sig.instance_variable_get(:@plan_func).loader.parent.path
# If it's a Puppet language plan, use strings to extract data. The only
# way to tell is to check which filename exists in the module.
plan_subpath = File.join(plan_name.split('::').drop(1))
plan_subpath = 'init' if plan_subpath.empty?
pp_path = File.join(mod, 'plans', "#{plan_subpath}.pp")
if File.exist?(pp_path)
require 'puppet-strings'
require 'puppet-strings/yard'
PuppetStrings::Yard.setup!
YARD::Logger.instance.level = :error
YARD.parse(pp_path)
plan = YARD::Registry.at("puppet_plans::#{plan_name}")
description = if plan.tag(:summary)
plan.tag(:summary).text
elsif !plan.docstring.empty?
plan.docstring
end
defaults = plan.parameters.reject { |_, value| value.nil? }.to_h
signature_params = Set.new(plan.parameters.map(&:first))
parameters = plan.tags(:param).each_with_object({}) do |param, params|
name = param.name
if signature_params.include?(name)
params[name] = { 'type' => param.types.first }
params[name]['default_value'] = defaults[name] if defaults.key?(name)
params[name]['description'] = param.text unless param.text.empty?
else
@logger.warn("The documented parameter '#{name}' does not exist in plan signature")
end
end
{
'name' => plan_name,
'description' => description,
'parameters' => parameters,
'module' => mod
}
# If it's a YAML plan, fall back to limited data
else
yaml_path = File.join(mod, 'plans', "#{plan_subpath}.yaml")
plan_content = File.read(yaml_path)
plan = Bolt::PAL::YamlPlan::Loader.from_string(plan_name, plan_content, yaml_path)
parameters = plan.parameters.each_with_object({}) do |param, params|
name = param.name
type_str = case param.type_expr
when Puppet::Pops::Types::PTypeReferenceType
param.type_expr.type_string
when nil
'Any'
else
param.type_expr
end
params[name] = { 'type' => type_str }
params[name]['default_value'] = param.value
params[name]['description'] = param.description if param.description
end
{
'name' => plan_name,
'description' => plan.description,
'parameters' => parameters,
'module' => mod
}
end
end
def convert_plan(plan_path)
Puppet[:tasks] = true
transpiler = YamlPlan::Transpiler.new
transpiler.transpile(plan_path)
end
# Returns a mapping of all modules available to the Bolt compiler
#
# @return [Hash{String => Array<Hash{Symbol => String,nil}>}]
# A hash that associates each directory on the module path with an array
# containing a hash of information for each module in that directory.
# The information hash provides the name, version, and a string
# indicating whether the module belongs to an internal module group.
def list_modules
internal_module_groups = { BOLTLIB_PATH => 'Plan Language Modules',
MODULES_PATH => 'Packaged Modules' }
in_bolt_compiler do
# NOTE: Can replace map+to_h with transform_values when Ruby 2.4
# is the minimum supported version.
Puppet.lookup(:current_environment).modules_by_path.map do |path, modules|
module_group = internal_module_groups[path]
values = modules.map do |mod|
mod_info = { name: (mod.forge_name || mod.name),
version: mod.version }
mod_info[:internal_module_group] = module_group unless module_group.nil?
mod_info
end
[path, values]
end.to_h
end
end
def generate_types
require 'puppet/face/generate'
in_bolt_compiler do
generator = Puppet::Generate::Type
inputs = generator.find_inputs(:pcore)
FileUtils.mkdir_p(@resource_types)
generator.generate(inputs, @resource_types, true)
end
end
def run_task(task_name, targets, params, executor, inventory, description = nil)
in_task_compiler(executor, inventory) do |compiler|
params = params.merge('_bolt_api_call' => true, '_catch_errors' => true)
compiler.call_function('run_task', task_name, targets, description, params)
end
end
def run_plan(plan_name, params, executor = nil, inventory = nil, pdb_client = nil, applicator = nil)
in_plan_compiler(executor, inventory, pdb_client, applicator) do |compiler|
r = compiler.call_function('run_plan', plan_name, params.merge('_bolt_api_call' => true))
Bolt::PlanResult.from_pcore(r, 'success')
end
rescue Bolt::Error => e
Bolt::PlanResult.new(e, 'failure')
end
end
end
| 1 | 14,216 | It'd be nice if we also caught `Unknown variable $trusted / $server_facts / $settings::` here - while I highly doubt anyone is using them (and if they are they're surely misusing them) it'd be crazy-making to have a working plan suddenly start failing with no clue as to why. | puppetlabs-bolt | rb |
@@ -967,6 +967,11 @@ type serviceInfo struct {
onlyNodeLocalEndpoints bool
}
+// TopologyKeys is part of ServicePort interface.
+func (info *serviceInfo) TopologyKeys() []string {
+ panic("NOT IMPLEMENTED")
+}
+
// String is part of ServicePort interface.
func (info *serviceInfo) String() string {
return fmt.Sprintf("%s:%d/%s", info.clusterIP, info.port, info.protocol) | 1 | // Copyright (c) 2017-2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
k8sp "k8s.io/kubernetes/pkg/proxy"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/nat"
"github.com/projectcalico/felix/bpf/routes"
"github.com/projectcalico/felix/ip"
)
var podNPIP = net.IPv4(255, 255, 255, 255)
type svcInfo struct {
id uint32
count int
localCount int
svc k8sp.ServicePort
}
type svcKey struct {
sname k8sp.ServicePortName
extra string
}
func (k svcKey) String() string {
if k.extra == "" {
return k.sname.String()
}
return fmt.Sprintf("%s:%s", k.extra, k.sname)
}
func getSvcKey(sname k8sp.ServicePortName, extra string) svcKey {
return svcKey{
sname: sname,
extra: extra,
}
}
type svcType int
const (
svcTypeExternalIP svcType = iota
svcTypeNodePort
svcTypeNodePortRemote
)
var svcType2String = map[svcType]string{
svcTypeNodePort: "NodePort",
svcTypeExternalIP: "ExternalIP",
svcTypeNodePortRemote: "NodePortRemote",
}
func getSvcKeyExtra(t svcType, ip string) string {
return svcType2String[t] + ":" + ip
}
func hasSvcKeyExtra(skey svcKey, t svcType) bool {
return strings.HasPrefix(skey.extra, svcType2String[t]+":")
}
func isSvcKeyDerived(skey svcKey) bool {
return hasSvcKeyExtra(skey, svcTypeExternalIP) || hasSvcKeyExtra(skey, svcTypeNodePort)
}
type stickyFrontend struct {
id uint32
timeo time.Duration
}
// Syncer is an implementation of DPSyncer interface. It is not thread safe and
// should be called only once at a time
type Syncer struct {
bpfSvcs bpf.Map
bpfEps bpf.Map
bpfAff bpf.Map
nextSvcID uint32
nodePortIPs []net.IP
rt Routes
// new maps are valid during the Apply()'s runtime to provide easy access
// to updating them. They become prev at the end of it to be compared
// against in the next iteration
newSvcMap map[svcKey]svcInfo
newEpsMap k8sp.EndpointsMap
prevSvcMap map[svcKey]svcInfo
prevEpsMap k8sp.EndpointsMap
// We never have more than one thread accessing the [prev|new][Svc|Eps]Map,
// this is to just make sure and to make the --race checker happy
mapsLck sync.Mutex
// synced is true after reconciling the first Apply
synced bool
// origs are deallocated after the first Apply reconciles
origSvcs nat.MapMem
origEps nat.BackendMapMem
expFixupWg sync.WaitGroup
expFixupStop chan struct{}
stop chan struct{}
stopOnce sync.Once
stickySvcs map[nat.FrontendKey]stickyFrontend
stickyEps map[uint32]map[nat.BackendValue]struct{}
stickySvcDeleted bool
}
func uniqueIPs(ips []net.IP) []net.IP {
m := make(map[string]net.IP)
unique := true
for _, ip := range ips {
s := ip.String()
if _, ok := m[s]; ok {
unique = false
} else {
m[s] = ip
}
}
if unique {
return ips
}
ret := make([]net.IP, 0, len(m))
for _, ip := range m {
ret = append(ret, ip)
}
return ret
}
// NewSyncer returns a new Syncer that uses the 2 provided maps
func NewSyncer(nodePortIPs []net.IP, svcsmap, epsmap, affmap bpf.Map, rt Routes) (*Syncer, error) {
s := &Syncer{
bpfSvcs: svcsmap,
bpfEps: epsmap,
bpfAff: affmap,
rt: rt,
nodePortIPs: uniqueIPs(nodePortIPs),
prevSvcMap: make(map[svcKey]svcInfo),
prevEpsMap: make(k8sp.EndpointsMap),
stop: make(chan struct{}),
}
if err := s.loadOrigs(); err != nil {
return nil, err
}
return s, nil
}
func (s *Syncer) loadOrigs() error {
svcs, err := nat.LoadFrontendMap(s.bpfSvcs)
if err != nil {
return err
}
eps, err := nat.LoadBackendMap(s.bpfEps)
if err != nil {
return err
}
s.origSvcs = svcs
s.origEps = eps
return nil
}
func (s *Syncer) startupSync(state DPSyncerState) error {
for svck, svcv := range s.origSvcs {
svckey := s.matchBpfSvc(svck, state.SvcMap)
if svckey == nil {
continue
}
id := svcv.ID()
count := int(svcv.Count())
s.prevSvcMap[*svckey] = svcInfo{
id: id,
count: count,
localCount: int(svcv.LocalCount()),
svc: state.SvcMap[svckey.sname],
}
delete(s.origSvcs, svck)
if id >= s.nextSvcID {
s.nextSvcID = id + 1
}
if svckey.extra != "" {
continue
}
for i := 0; i < count; i++ {
epk := nat.NewNATBackendKey(id, uint32(i))
ep, ok := s.origEps[epk]
if !ok {
log.Debugf("s.origSvcs = %+v\n", s.origSvcs)
log.Debugf("s.origEps = %+v\n", s.origEps)
return errors.Errorf("inconsistent backed map, missing ep %s", epk)
}
s.prevEpsMap[svckey.sname] = append(s.prevEpsMap[svckey.sname],
&k8sp.BaseEndpointInfo{
Endpoint: net.JoinHostPort(ep.Addr().String(), strconv.Itoa(int(ep.Port()))),
// IsLocal is not importatnt here
})
delete(s.origEps, epk)
}
}
for k := range s.origSvcs {
log.Debugf("removing stale %s", k)
if err := s.bpfSvcs.Delete(k[:]); err != nil {
return errors.Errorf("bpfSvcs.Delete: %s", err)
}
}
for k := range s.origEps {
log.Debugf("removing stale %s", k)
if err := s.bpfEps.Delete(k[:]); err != nil {
return errors.Errorf("bpfEps.Delete: %s", err)
}
}
return nil
}
func (s *Syncer) cleanupDerived(id uint32) error {
// also delete all derived
for _, si := range s.prevSvcMap {
if si.id == id {
key, err := getSvcNATKey(si.svc)
if err != nil {
return err
}
log.Debugf("bpf map deleting derived %s:%s", key, nat.NewNATValue(id, 0, 0, 0))
if err := s.bpfSvcs.Delete(key[:]); err != nil {
return errors.Errorf("bpfSvcs.Delete: %s", err)
}
}
}
return nil
}
func (s *Syncer) applySvc(skey svcKey, sinfo k8sp.ServicePort, eps []k8sp.Endpoint,
cleanupDerived func(uint32) error) error {
var (
err error
id uint32
count int
local int
)
old, exists := s.prevSvcMap[skey]
if exists {
if old.svc == sinfo {
id = old.id
count, local, err = s.updateExistingSvc(skey.sname, sinfo, id, old.count, eps)
} else {
if err := s.deleteSvc(old.svc, old.id, old.count); err != nil {
return err
}
delete(s.prevSvcMap, skey)
if cleanupDerived != nil {
if err := cleanupDerived(old.id); err != nil {
return errors.WithMessage(err, "cleanupDerived")
}
}
exists = false
}
}
if !exists {
id = s.newSvcID()
count, local, err = s.newSvc(skey.sname, sinfo, id, eps)
}
if err != nil {
return err
}
s.newSvcMap[skey] = svcInfo{
id: id,
count: count,
localCount: local,
svc: sinfo,
}
log.Debugf("applied a service %s update: sinfo=%+v", skey, s.newSvcMap[skey])
return nil
}
func (s *Syncer) applyExpandedNP(sname k8sp.ServicePortName, sinfo k8sp.ServicePort,
eps []k8sp.Endpoint, node ip.V4Addr, nport int) error {
skey := getSvcKey(sname, getSvcKeyExtra(svcTypeNodePortRemote, node.String()))
si := serviceInfoFromK8sServicePort(sinfo)
si.clusterIP = node.AsNetIP()
si.port = nport
if err := s.applySvc(skey, si, eps, nil); err != nil {
return errors.Errorf("apply NodePortRemote for %s node %s", sname, node)
}
return nil
}
type expandMiss struct {
sname k8sp.ServicePortName
sinfo k8sp.ServicePort
eps []k8sp.Endpoint
nport int
}
func (s *Syncer) expandNodePorts(sname k8sp.ServicePortName, sinfo k8sp.ServicePort,
eps []k8sp.Endpoint, nport int, rtLookup func(addr ip.Addr) (routes.Value, bool)) *expandMiss {
m := make(map[ip.V4Addr][]k8sp.Endpoint)
var miss *expandMiss
for _, ep := range eps {
ipv4 := ip.FromString(ep.IP()).(ip.V4Addr)
rt, ok := rtLookup(ipv4)
if !ok {
log.Errorf("No route for %s", ipv4)
if miss == nil {
miss = &expandMiss{
sname: sname,
sinfo: sinfo,
nport: nport,
}
}
miss.eps = append(miss.eps, ep)
continue
}
nodeIP := rt.NextHop().(ip.V4Addr)
log.Debugf("found rt %s for dest %s", nodeIP, ipv4)
m[nodeIP] = append(m[nodeIP], ep)
}
for node, neps := range m {
if err := s.applyExpandedNP(sname, sinfo, neps, node, nport); err != nil {
log.WithField("error", err).Errorf("Failed to expand NodePort")
}
}
return miss
}
func (s *Syncer) applyDerived(sname k8sp.ServicePortName, t svcType, sinfo k8sp.ServicePort) error {
svc, ok := s.newSvcMap[getSvcKey(sname, "")]
if !ok {
// this should not happen
return errors.Errorf("no ClusterIP for derived service type %d", t)
}
var skey svcKey
count := svc.count
local := svc.localCount
skey = getSvcKey(sname, getSvcKeyExtra(t, sinfo.ClusterIP().String()))
switch t {
case svcTypeNodePort:
if sinfo.OnlyNodeLocalEndpoints() {
count = local // use only local eps
}
}
newInfo := svcInfo{
id: svc.id,
count: count,
localCount: local,
svc: sinfo,
}
if oldInfo, ok := s.prevSvcMap[skey]; !ok || oldInfo != newInfo {
if err := s.writeSvc(sinfo, svc.id, count, local); err != nil {
return err
}
}
s.newSvcMap[skey] = newInfo
log.Debugf("applied a derived service %s update: sinfo=%+v", skey, s.newSvcMap[skey])
return nil
}
func (s *Syncer) apply(state DPSyncerState) error {
log.Debugf("applying new state")
// we need to copy the maps from the new state to compute the diff in the
// next call. We cannot keep the provided maps as the generic k8s proxy code
// updates them. This function is called with a lock help so we are safe
// here and now.
s.newSvcMap = make(map[svcKey]svcInfo)
s.newEpsMap = make(k8sp.EndpointsMap)
var expNPMisses []*expandMiss
// insert or update existing services
for sname, sinfo := range state.SvcMap {
skey := getSvcKey(sname, "")
eps := state.EpsMap[sname]
if err := s.applySvc(skey, sinfo, eps, s.cleanupDerived); err != nil {
return err
}
// N.B. we assume that k8s provide us with no duplicities
for _, extIP := range sinfo.ExternalIPStrings() {
extInfo := serviceInfoFromK8sServicePort(sinfo)
extInfo.clusterIP = net.ParseIP(extIP)
err := s.applyDerived(sname, svcTypeExternalIP, extInfo)
if err != nil {
log.Errorf("failed to apply ExternalIP %s for service %s : %s", extIP, sname, err)
continue
}
}
if nport := sinfo.NodePort(); nport != 0 {
for _, npip := range s.nodePortIPs {
npInfo := serviceInfoFromK8sServicePort(sinfo)
npInfo.clusterIP = npip
npInfo.port = nport
if npip.Equal(podNPIP) && sinfo.OnlyNodeLocalEndpoints() {
// do not program the meta entry, program each node
// separately
continue
}
err := s.applyDerived(sname, svcTypeNodePort, npInfo)
if err != nil {
log.Errorf("failed to apply NodePort %s for service %s : %s", npip, sname, err)
continue
}
}
if sinfo.OnlyNodeLocalEndpoints() {
if miss := s.expandNodePorts(sname, sinfo, eps, nport, s.rt.Lookup); miss != nil {
expNPMisses = append(expNPMisses, miss)
}
}
}
}
// delete services that do not exist anymore now that we added new nodeports
// and external ips
for skey, sinfo := range s.prevSvcMap {
if _, ok := s.newSvcMap[skey]; ok {
continue
}
count := sinfo.count
if isSvcKeyDerived(skey) {
// do not delete backends if only deleting a service derived from a
// ClusterIP, that is ExternalIP or NodePort
count = 0
log.Debugf("deleting derived svc %s", skey)
}
if err := s.deleteSvc(sinfo.svc, sinfo.id, count); err != nil {
return err
}
if sinfo.svc.SessionAffinityType() == v1.ServiceAffinityClientIP {
s.stickySvcDeleted = true
}
log.Infof("removed stale service %q", skey)
}
log.Debugf("new state written")
s.runExpandNPFixup(expNPMisses)
return nil
}
// Apply applies the new state
func (s *Syncer) Apply(state DPSyncerState) error {
if !s.synced {
log.Infof("Syncing k8s state and bpf maps after start")
if err := s.startupSync(state); err != nil {
return errors.WithMessage(err, "startup sync")
}
s.synced = true
// deallocate, no further use
s.origSvcs = nil
s.origEps = nil
log.Infof("Startup sync complete")
} else {
// if we were not synced yet, the fixer cannot run yet
s.stopExpandNPFixup()
s.prevSvcMap = s.newSvcMap
s.prevEpsMap = s.newEpsMap
}
// preallocate maps the track sticky service for cleanup
s.stickySvcs = make(map[nat.FrontendKey]stickyFrontend)
s.stickyEps = make(map[uint32]map[nat.BackendValue]struct{})
s.stickySvcDeleted = false
defer func() {
// not needed anymore
s.stickySvcs = nil
s.stickyEps = nil
}()
s.mapsLck.Lock()
defer s.mapsLck.Unlock()
if err := s.apply(state); err != nil {
// dont bother to cleanup affinity since we do not know in what state we
// are anyway. Will get resolved once we get in a good state
return err
}
// We wrote all updates, noone will create new records in affinity table
// that we would clean up now, so do it!
return s.cleanupSticky()
}
func (s *Syncer) updateExistingSvc(sname k8sp.ServicePortName, sinfo k8sp.ServicePort, id uint32,
oldCount int, eps []k8sp.Endpoint) (int, int, error) {
// No need to delete any old entries if we do reduce the number of backends
// as all the key:value are going to be rewritten/updated
if oldCount > len(eps) {
for i := 0; i < oldCount; i++ {
if err := s.deleteSvcBackend(id, uint32(i)); err != nil {
return 0, 0, err
}
}
}
return s.newSvc(sname, sinfo, id, eps)
}
func (s *Syncer) newSvc(sname k8sp.ServicePortName, sinfo k8sp.ServicePort, id uint32,
eps []k8sp.Endpoint) (int, int, error) {
cpEps := make([]k8sp.Endpoint, 0, len(eps))
cnt := 0
local := 0
if sinfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
// since we write the backend before we write the frontend, we need to
// preallocate the map for it
s.stickyEps[id] = make(map[nat.BackendValue]struct{})
}
for _, ep := range eps {
if !ep.GetIsLocal() {
continue
}
if err := s.writeSvcBackend(id, uint32(cnt), ep); err != nil {
return 0, 0, err
}
cpEps = append(cpEps, ep)
cnt++
local++
}
for _, ep := range eps {
if ep.GetIsLocal() {
continue
}
if err := s.writeSvcBackend(id, uint32(cnt), ep); err != nil {
return 0, 0, err
}
cpEps = append(cpEps, ep)
cnt++
}
if err := s.writeSvc(sinfo, id, cnt, local); err != nil {
return 0, 0, err
}
s.newEpsMap[sname] = cpEps
return cnt, local, nil
}
func (s *Syncer) writeSvcBackend(svcID uint32, idx uint32, ep k8sp.Endpoint) error {
ip := net.ParseIP(ep.IP())
key := nat.NewNATBackendKey(svcID, uint32(idx))
tgtPort, err := ep.Port()
if err != nil {
return errors.Errorf("no port for endpoint %q: %s", ep, err)
}
val := nat.NewNATBackendValue(ip, uint16(tgtPort))
log.Debugf("bpf map writing %s:%s", key, val)
if err := s.bpfEps.Update(key[:], val[:]); err != nil {
return errors.Errorf("bpfEps.Update: %s", err)
}
if s.stickyEps[svcID] != nil {
s.stickyEps[svcID][val] = struct{}{}
}
return nil
}
func (s *Syncer) deleteSvcBackend(svcID uint32, idx uint32) error {
key := nat.NewNATBackendKey(svcID, uint32(idx))
log.Debugf("bpf map deleting %s", key)
if err := s.bpfEps.Delete(key[:]); err != nil {
return errors.Errorf("bpfEps.Delete: %s", err)
}
return nil
}
func getSvcNATKey(svc k8sp.ServicePort) (nat.FrontendKey, error) {
ip := svc.ClusterIP()
port := svc.Port()
proto, err := protoV1ToInt(svc.Protocol())
if err != nil {
return nat.FrontendKey{}, err
}
key := nat.NewNATKey(ip, uint16(port), proto)
return key, nil
}
func (s *Syncer) writeSvc(svc k8sp.ServicePort, svcID uint32, count, local int) error {
key, err := getSvcNATKey(svc)
if err != nil {
return err
}
affinityTimeo := uint32(0)
if svc.SessionAffinityType() == v1.ServiceAffinityClientIP {
affinityTimeo = uint32(svc.StickyMaxAgeSeconds())
}
val := nat.NewNATValue(svcID, uint32(count), uint32(local), affinityTimeo)
log.Debugf("bpf map writing %s:%s", key, val)
if err := s.bpfSvcs.Update(key[:], val[:]); err != nil {
return errors.Errorf("bpfSvcs.Update: %s", err)
}
// we must have written the backends by now so the map exists
if s.stickyEps[svcID] != nil {
s.stickySvcs[key] = stickyFrontend{
id: svcID,
timeo: time.Duration(affinityTimeo) * time.Second,
}
}
return nil
}
func (s *Syncer) deleteSvc(svc k8sp.ServicePort, svcID uint32, count int) error {
for i := 0; i < count; i++ {
if err := s.deleteSvcBackend(svcID, uint32(i)); err != nil {
return err
}
}
key, err := getSvcNATKey(svc)
if err != nil {
return err
}
log.Debugf("bpf map deleting %s:%s", key, nat.NewNATValue(svcID, uint32(count), 0, 0))
if err := s.bpfSvcs.Delete(key[:]); err != nil {
return errors.Errorf("bpfSvcs.Delete: %s", err)
}
return nil
}
func protoV1ToInt(p v1.Protocol) (uint8, error) {
switch p {
case v1.ProtocolTCP:
return 6, nil
case v1.ProtocolUDP:
return 17, nil
case v1.ProtocolSCTP:
return 132, nil
}
return 0, errors.Errorf("unknown protocol %q", p)
}
// ProtoV1ToIntPanic translates k8s v1.Protocol to its IANA number and panics if
// the protocol is not recognized
func ProtoV1ToIntPanic(p v1.Protocol) uint8 {
pn, err := protoV1ToInt(p)
if err != nil {
panic(err)
}
return pn
}
func (s *Syncer) newSvcID() uint32 {
// TODO we may run out of IDs unless we restart ot recycle
id := s.nextSvcID
s.nextSvcID++
return id
}
func (s *Syncer) matchBpfSvc(bsvc nat.FrontendKey, svcs k8sp.ServiceMap) *svcKey {
for svc, info := range svcs {
if bsvc.Proto() != ProtoV1ToIntPanic(info.Protocol()) {
continue
}
matchNP := func() *svcKey {
if bsvc.Port() == uint16(info.NodePort()) {
for _, nip := range s.nodePortIPs {
if bsvc.Addr().String() == nip.String() {
skey := &svcKey{
sname: svc,
extra: getSvcKeyExtra(svcTypeNodePort, nip.String()),
}
log.Debugf("resolved %s as %s", bsvc, skey)
return skey
}
}
}
return nil
}
if bsvc.Port() != uint16(info.Port()) {
if sk := matchNP(); sk != nil {
return sk
}
continue
}
if bsvc.Addr().String() == info.ClusterIP().String() {
skey := &svcKey{
sname: svc,
}
log.Debugf("resolved %s as %s", bsvc, skey)
return skey
}
for _, eip := range info.ExternalIPStrings() {
if bsvc.Addr().String() == eip {
skey := &svcKey{
sname: svc,
extra: getSvcKeyExtra(svcTypeExternalIP, eip),
}
log.Debugf("resolved %s as %s", bsvc, skey)
return skey
}
}
// just in case the NodePort port is the same as the Port
if sk := matchNP(); sk != nil {
return sk
}
}
return nil
}
func (s *Syncer) runExpandNPFixup(misses []*expandMiss) {
s.expFixupStop = make(chan struct{})
if len(misses) == 0 {
return
}
s.expFixupWg.Add(1)
// start the fixer routine and exit
go func() {
log.Debug("fixer started")
defer s.expFixupWg.Done()
defer log.Debug("fixer exited")
s.mapsLck.Lock()
defer s.mapsLck.Unlock()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// monitor if we should stop and if so, cancel any work
go func() {
select {
case <-s.stop:
cancel()
case <-s.expFixupStop:
cancel()
case <-ctx.Done():
// do nothing, we exited, work is done, just quit
}
}()
for {
log.Debugf("%d misses unresolved", len(misses))
// We do one pass rightaway since we cannot know whether there
// was an update or not before we got here
s.rt.WaitAfter(ctx, func(lookup func(addr ip.Addr) (routes.Value, bool)) bool {
var again []*expandMiss
for _, m := range misses {
if miss := s.expandNodePorts(m.sname, m.sinfo, m.eps, m.nport, lookup); miss != nil {
again = append(again, miss)
}
}
misses = again
return len(misses) == 0 // block or not block
})
if len(misses) == 0 || ctx.Err() != nil {
return
}
}
}()
}
func (s *Syncer) stopExpandNPFixup() {
close(s.expFixupStop)
s.expFixupWg.Wait()
}
// Stop sto pthe syncer
func (s *Syncer) Stop() {
s.stopOnce.Do(func() {
close(s.stop)
s.expFixupWg.Wait()
})
}
func (s *Syncer) cleanupSticky() error {
// no sticky service was updated, there cannot be any stale affinity entries
// to clean up
if len(s.stickySvcs) == 0 && !s.stickySvcDeleted {
return nil
}
var (
key nat.AffinityKey
val nat.AffinityValue
)
dels := make([]nat.AffinityKey, 0, 64)
ks := len(nat.AffinityKey{})
vs := len(nat.AffinityValue{})
now := time.Duration(bpf.KTimeNanos())
err := s.bpfAff.Iter(func(k, v []byte) {
copy(key[:], k[:ks])
copy(val[:], v[:vs])
fend, ok := s.stickySvcs[key.FrontendKey()]
if !ok {
log.Debugf("cleaning affinity %v:%v - no such a service", key, val)
dels = append(dels, key)
return
}
if _, ok := s.stickyEps[fend.id][val.Backend()]; !ok {
log.Debugf("cleaning affinity %v:%v - no such a backend", key, val)
dels = append(dels, key)
return
}
if now-val.Timestamp() > fend.timeo {
log.Debugf("cleaning affinity %v:%v - expired", key, val)
dels = append(dels, key)
return
}
log.Debugf("cleaning affinity %v:%v - keeping", key, val)
})
if err != nil {
return errors.Errorf("NAT affinity map iterator failed: %s", err)
}
errs := 0
for _, k := range dels {
if err := s.bpfAff.Delete(k.AsBytes()); err != nil {
log.WithField("key", k).Errorf("Failed to delete stale NAT affinity record")
}
}
if errs > 0 {
return errors.Errorf("encountered %d errors writing NAT affinity map", errs)
}
return nil
}
func serviceInfoFromK8sServicePort(sport k8sp.ServicePort) *serviceInfo {
sinfo := new(serviceInfo)
// create a shallow copy
sinfo.clusterIP = sport.ClusterIP()
sinfo.port = sport.Port()
sinfo.protocol = sport.Protocol()
sinfo.nodePort = sport.NodePort()
sinfo.sessionAffinityType = sport.SessionAffinityType()
sinfo.stickyMaxAgeSeconds = sport.StickyMaxAgeSeconds()
sinfo.externalIPs = sport.ExternalIPStrings()
sinfo.loadBalancerSourceRanges = sport.LoadBalancerSourceRanges()
sinfo.healthCheckNodePort = sport.HealthCheckNodePort()
sinfo.onlyNodeLocalEndpoints = sport.OnlyNodeLocalEndpoints()
return sinfo
}
type serviceInfo struct {
clusterIP net.IP
port int
protocol v1.Protocol
nodePort int
sessionAffinityType v1.ServiceAffinity
stickyMaxAgeSeconds int
externalIPs []string
loadBalancerSourceRanges []string
healthCheckNodePort int
onlyNodeLocalEndpoints bool
}
// String is part of ServicePort interface.
func (info *serviceInfo) String() string {
return fmt.Sprintf("%s:%d/%s", info.clusterIP, info.port, info.protocol)
}
// ClusterIP is part of ServicePort interface.
func (info *serviceInfo) ClusterIP() net.IP {
return info.clusterIP
}
// Port is part of ServicePort interface.
func (info *serviceInfo) Port() int {
return info.port
}
// SessionAffinityType is part of the ServicePort interface.
func (info *serviceInfo) SessionAffinityType() v1.ServiceAffinity {
return info.sessionAffinityType
}
// StickyMaxAgeSeconds is part of the ServicePort interface
func (info *serviceInfo) StickyMaxAgeSeconds() int {
return info.stickyMaxAgeSeconds
}
// Protocol is part of ServicePort interface.
func (info *serviceInfo) Protocol() v1.Protocol {
return info.protocol
}
// LoadBalancerSourceRanges is part of ServicePort interface
func (info *serviceInfo) LoadBalancerSourceRanges() []string {
return info.loadBalancerSourceRanges
}
// HealthCheckNodePort is part of ServicePort interface.
func (info *serviceInfo) HealthCheckNodePort() int {
return info.healthCheckNodePort
}
// NodePort is part of the ServicePort interface.
func (info *serviceInfo) NodePort() int {
return info.nodePort
}
// ExternalIPStrings is part of ServicePort interface.
func (info *serviceInfo) ExternalIPStrings() []string {
return info.externalIPs
}
// LoadBalancerIPStrings is part of ServicePort interface.
func (info *serviceInfo) LoadBalancerIPStrings() []string {
panic("NOT IMPLEMENTED")
}
// OnlyNodeLocalEndpoints is part of ServicePort interface.
func (info *serviceInfo) OnlyNodeLocalEndpoints() bool {
return info.onlyNodeLocalEndpoints
}
// K8sServicePortOption defines options for NewK8sServicePort
type K8sServicePortOption func(interface{})
// NewK8sServicePort creates a new k8s ServicePort
func NewK8sServicePort(clusterIP net.IP, port int, proto v1.Protocol,
opts ...K8sServicePortOption) k8sp.ServicePort {
x := &serviceInfo{
clusterIP: clusterIP,
port: port,
protocol: proto,
}
for _, o := range opts {
o(x)
}
return x
}
// K8sSvcWithExternalIPs sets ExternalIPs
func K8sSvcWithExternalIPs(ips []string) K8sServicePortOption {
return func(s interface{}) {
s.(*serviceInfo).externalIPs = ips
}
}
// K8sSvcWithNodePort sets the nodeport
func K8sSvcWithNodePort(np int) K8sServicePortOption {
return func(s interface{}) {
s.(*serviceInfo).nodePort = np
}
}
// K8sSvcWithLocalOnly sets OnlyNodeLocalEndpoints=true
func K8sSvcWithLocalOnly() K8sServicePortOption {
return func(s interface{}) {
s.(*serviceInfo).onlyNodeLocalEndpoints = true
}
}
// K8sSvcWithStickyClientIP sets ServiceAffinityClientIP to seconds
func K8sSvcWithStickyClientIP(seconds int) K8sServicePortOption {
return func(s interface{}) {
s.(*serviceInfo).stickyMaxAgeSeconds = seconds
s.(*serviceInfo).sessionAffinityType = v1.ServiceAffinityClientIP
}
}
| 1 | 17,884 | Probably need to understand/implement this one now. | projectcalico-felix | go |
@@ -91,8 +91,13 @@ public class JdbcDependencyManager {
while (rs.next()) {
// Columns are (starting at index 1): file_name, file_sha1, validation_status
Dependency d = hashAndFileNameToDep.remove(rs.getString(1) + rs.getString(2));
- FileValidationStatus v = FileValidationStatus.valueOf(rs.getInt(3));
- depValidationStatuses.put(d, v);
+
+ // HashMap.remove will return null if the key is not found, hence check for it before
+ // adding to depValidationStatuses.
+ if (d != null) {
+ FileValidationStatus v = FileValidationStatus.valueOf(rs.getInt(3));
+ depValidationStatuses.put(d, v);
+ }
}
// All remaining dependencies in the hashToDep map should be marked as being NEW (because they weren't | 1 | /*
* Copyright 2019 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.project;
import azkaban.db.DatabaseOperator;
import azkaban.spi.Dependency;
import azkaban.spi.FileValidationStatus;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.dbutils.DbUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provides methods for interacting with dependency validation cache in DB. Used during thin archive
* uploads.
*/
@Singleton
public class JdbcDependencyManager {
private static final Logger log = LoggerFactory.getLogger(JdbcDependencyManager.class);
private final DatabaseOperator dbOperator;
@Inject
JdbcDependencyManager(final DatabaseOperator dbOperator) {
this.dbOperator = dbOperator;
}
public Map<Dependency, FileValidationStatus> getValidationStatuses(final Set<Dependency> deps,
final String validationKey) throws SQLException {
Map<Dependency, FileValidationStatus> depValidationStatuses = new HashMap<>();
if (deps.isEmpty()) {
// There's nothing for us to do.
return depValidationStatuses;
}
// Map of (filename + sha1) -> Dependency for resolving the dependencies already cached in the DB
// after the query completes.
Map<String, Dependency> hashAndFileNameToDep = new HashMap<>();
Connection conn = null;
ResultSet rs = null;
PreparedStatement stmnt = null;
// TODO: Use azkaban.db.DatabaseOperator.query() instead of getting the DB connection and
// dealing with connection lifecycle.
try {
conn = this.dbOperator.getDataSource().getConnection();
if (conn == null) {
throw new SQLException("Null connection");
}
stmnt = conn.prepareStatement(
String
.format("SELECT file_name, file_sha1, validation_status FROM validated_dependencies "
+ "WHERE validation_key = ? AND (%s)", makeStrWithQuestionMarks(deps.size())));
// Set the first param, which is the validation_key
stmnt.setString(1, validationKey);
// Start at 2 because the first parameter is at index 1, and that is the validator key that we already set.
int index = 2;
for (Dependency d : deps) {
stmnt.setString(index++, d.getFileName());
stmnt.setString(index++, d.getSHA1());
hashAndFileNameToDep.put(d.getFileName() + d.getSHA1(), d);
}
rs = stmnt.executeQuery();
while (rs.next()) {
// Columns are (starting at index 1): file_name, file_sha1, validation_status
Dependency d = hashAndFileNameToDep.remove(rs.getString(1) + rs.getString(2));
FileValidationStatus v = FileValidationStatus.valueOf(rs.getInt(3));
depValidationStatuses.put(d, v);
}
// All remaining dependencies in the hashToDep map should be marked as being NEW (because they weren't
// associated with any DB entry)
hashAndFileNameToDep.values().stream()
.forEach(d -> depValidationStatuses.put(d, FileValidationStatus.NEW));
} catch (final SQLException ex) {
log.error("Transaction failed: ", ex);
throw ex;
} finally {
// Replicate the order of closing in org.apache.commons.dbutils.QueryRunner#query
DbUtils.closeQuietly(conn, stmnt, rs);
}
return depValidationStatuses;
}
public void updateValidationStatuses(final Map<Dependency, FileValidationStatus> depValidationStatuses,
final String validationKey) throws SQLException {
if (depValidationStatuses.isEmpty()) {
return;
}
// Order of columns: file_name, file_sha1, validation_key, validation_status
Object[][] rowsToInsert = depValidationStatuses
.keySet()
.stream()
.map(d -> new Object[]{d.getFileName(), d.getSHA1(), validationKey, depValidationStatuses.get(d).getValue()})
.toArray(Object[][]::new);
// We use insert IGNORE because a another process may have been processing the same dependency
// and written the row for a given dependency before we were able to (resulting in a duplicate primary key
// error when we try to write the row), so this will ignore the error and continue persisting the other
// dependencies.
this.dbOperator.batch("INSERT IGNORE INTO validated_dependencies "
+ "(file_name, file_sha1, validation_key, validation_status) VALUES (?, ?, ?, ?)", rowsToInsert);
}
private static String makeStrWithQuestionMarks(final int num) {
StringBuilder builder = new StringBuilder();
for(int i = 0; i < num; i++) {
builder.append("(file_name = ? and file_sha1 = ?) or ");
}
// Remove trailing " or ";
return builder.substring(0, builder.length() - 4);
}
}
| 1 | 19,750 | It will be better to add contains check for key in hashAndFileNameToDep instead of null check over here. All of these should be inside that contains if case. | azkaban-azkaban | java |
@@ -300,8 +300,18 @@ module Beaker
new_conf
end
+ # Restarts the named puppet service
+ #
+ # @param [Host] host Host the service runs on
+ # @param [String] service Name of the service to restart
+ # @param [Fixnum] curl_retries Number of times to retry the restart command
+ # @param [Fixnum] port Port to check status at
+ #
+ # @return [Result] Result of last status check
# @!visibility private
- def bounce_service host, service, curl_retries = 120
+ def bounce_service host, service, curl_retries = nil, port = nil
+ curl_retries = 120 if curl_retries.nil?
+ port = options[:puppetserver_port] if port.nil?
if host.graceful_restarts?
apachectl_path = host.is_pe? ? "#{host['puppetsbindir']}/apache2ctl" : 'apache2ctl'
host.exec(Command.new("#{apachectl_path} graceful")) | 1 | require 'timeout'
require 'inifile'
require 'resolv'
module Beaker
module DSL
module Helpers
# Methods that help you interact with your puppet installation, puppet must be installed
# for these methods to execute correctly
module PuppetHelpers
# @!macro [new] common_opts
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :silent (false) Do not produce log output
# @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array
# (or range) of integer exit codes that should be considered
# acceptable. An error will be thrown if the exit code does not
# match one of the values in this list.
# @option opts [Boolean] :accept_all_exit_codes (false) Consider all
# exit codes as passing.
# @option opts [Boolean] :dry_run (false) Do not actually execute any
# commands on the SUT
# @option opts [String] :stdin (nil) Input to be provided during command
# execution on the SUT.
# @option opts [Boolean] :pty (false) Execute this command in a pseudoterminal.
# @option opts [Boolean] :expect_connection_failure (false) Expect this command
# to result in a connection failure, reconnect and continue execution.
# @option opts [Hash{String=>String}] :environment ({}) These will be
# treated as extra environment variables that should be set before
# running the command.
#
# Return the name of the puppet user.
#
# @param [Host] host One object that acts like a Beaker::Host
#
# @note This method assumes puppet is installed on the host.
#
def puppet_user(host)
return host.puppet('master')['user']
end
# Return the name of the puppet group.
#
# @param [Host] host One object that acts like a Beaker::Host
#
# @note This method assumes puppet is installed on the host.
#
def puppet_group(host)
return host.puppet('master')['group']
end
# Test Puppet running in a certain run mode with specific options.
# This ensures the following steps are performed:
# 1. The pre-test Puppet configuration is backed up
# 2. A new Puppet configuraton file is layed down
# 3. Puppet is started or restarted in the specified run mode
# 4. Ensure Puppet has started correctly
# 5. Further tests are yielded to
# 6. Revert Puppet to the pre-test state
# 7. Testing artifacts are saved in a folder named for the test
#
# @note Whether Puppet is started or restarted depends on what kind of
# server you're running. Passenger and puppetserver are restarted before.
# Webrick is started before and stopped after yielding, unless you're using
# service scripts, then it'll behave like passenger & puppetserver.
# Passenger and puppetserver (or webrick using service scripts)
# restart after yielding by default. You can stop this from happening
# by setting the :restart_when_done flag of the conf_opts argument.
#
# @param [Host] host One object that act like Host
#
# @param [Hash{Symbol=>String}] conf_opts Represents puppet settings.
# Sections of the puppet.conf may be
# specified, if no section is specified the
# a puppet.conf file will be written with the
# options put in a section named after [mode]
# @option conf_opts [String] :__commandline_args__ A special setting for
# command_line arguments such as --debug or
# --logdest, which cannot be set in
# puppet.conf. For example:
#
# :__commandline_args__ => '--logdest /tmp/a.log'
#
# These will only be applied when starting a FOSS
# master, as a pe master is just bounced.
# @option conf_opts [Hash] :__service_args__ A special setting of options
# for controlling how the puppet master service is
# handled. The only setting currently is
# :bypass_service_script, which if set true will
# force stopping and starting a webrick master
# using the start_puppet_from_source_* methods,
# even if it seems the host has passenger.
# This is needed in FOSS tests to initialize
# SSL.
# @option conf_opts [Boolean] :restart_when_done determines whether a restart
# should be run after the test has been yielded to.
# Will stop puppet if false. Default behavior
# is to restart, but you can override this on the
# host or with this option.
# (Note: only works for passenger & puppetserver
# masters (or webrick using the service scripts))
# @param [File] testdir The temporary directory which will hold backup
# configuration, and other test artifacts.
#
# @param [Block] block The point of this method, yields so
# tests may be ran. After the block is finished
# puppet will revert to a previous state.
#
# @example A simple use case to ensure a master is running
# with_puppet_running_on( master ) do
# ...tests that require a master...
# end
#
# @example Fully utilizing the possiblities of config options
# with_puppet_running_on( master,
# :main => {:logdest => '/var/blah'},
# :master => {:masterlog => '/elswhere'},
# :agent => {:server => 'localhost'} ) do
#
# ...tests to be ran...
# end
#
def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash)
cmdline_args = conf_opts[:__commandline_args__]
service_args = conf_opts[:__service_args__] || {}
restart_when_done = true
restart_when_done = host[:restart_when_done] if host.has_key?(:restart_when_done)
restart_when_done = conf_opts.fetch(:restart_when_done, restart_when_done)
conf_opts = conf_opts.reject { |k,v| [:__commandline_args__, :__service_args__, :restart_when_done].include?(k) }
curl_retries = host['master-start-curl-retries'] || options['master-start-curl-retries']
logger.debug "Setting curl retries to #{curl_retries}"
if options[:is_puppetserver]
confdir = host.puppet('master')['confdir']
vardir = host.puppet('master')['vardir']
if cmdline_args
split_args = cmdline_args.split()
split_args.each do |arg|
case arg
when /--confdir=(.*)/
confdir = $1
when /--vardir=(.*)/
vardir = $1
end
end
end
puppetserver_opts = { "jruby-puppet" => {
"master-conf-dir" => confdir,
"master-var-dir" => vardir,
}}
puppetserver_conf = File.join("#{host['puppetserver-confdir']}", "puppetserver.conf")
modify_tk_config(host, puppetserver_conf, puppetserver_opts)
end
begin
backup_file = backup_the_file(host, host.puppet('master')['confdir'], testdir, 'puppet.conf')
lay_down_new_puppet_conf host, conf_opts, testdir
if host.use_service_scripts? && !service_args[:bypass_service_script]
bounce_service( host, host['puppetservice'], curl_retries )
else
puppet_master_started = start_puppet_from_source_on!( host, cmdline_args )
end
yield self if block_given?
rescue Beaker::DSL::Assertions, Minitest::Assertion => early_assertion
fail_test(early_assertion)
rescue Exception => early_exception
original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n")
raise(original_exception)
ensure
begin
if host.use_service_scripts? && !service_args[:bypass_service_script]
restore_puppet_conf_from_backup( host, backup_file )
if restart_when_done
bounce_service( host, host['puppetservice'], curl_retries )
else
host.exec puppet_resource('service', host['puppetservice'], 'ensure=stopped')
end
else
if puppet_master_started
stop_puppet_from_source_on( host )
else
dump_puppet_log(host)
end
restore_puppet_conf_from_backup( host, backup_file )
end
rescue Exception => teardown_exception
begin
if !host.is_pe?
dump_puppet_log(host)
end
rescue Exception => dumping_exception
logger.error("Raised during attempt to dump puppet logs: #{dumping_exception}")
end
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception
else
raise teardown_exception
end
end
end
end
# Test Puppet running in a certain run mode with specific options,
# on the default host
# @see #with_puppet_running_on
def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
with_puppet_running_on(default, conf_opts, testdir, &block)
end
# @!visibility private
def restore_puppet_conf_from_backup( host, backup_file )
puppet_conf = host.puppet('master')['config']
if backup_file
host.exec( Command.new( "if [ -f '#{backup_file}' ]; then " +
"cat '#{backup_file}' > " +
"'#{puppet_conf}'; " +
"rm -f '#{backup_file}'; " +
"fi" ) )
else
host.exec( Command.new( "rm -f '#{puppet_conf}'" ))
end
end
# @!visibility private
def start_puppet_from_source_on! host, args = ''
host.exec( puppet( 'master', args ) )
logger.debug 'Waiting for the puppet master to start'
unless port_open_within?( host, 8140, 10 )
raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion'
end
logger.debug 'The puppet master has started'
return true
end
# @!visibility private
def stop_puppet_from_source_on( host )
pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp
host.exec( Command.new( "kill #{pid}" ) )
Timeout.timeout(10) do
while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do
# until kill -0 finds no process and we know that puppet has finished cleaning up
sleep 1
end
end
end
# @!visibility private
def dump_puppet_log(host)
syslogfile = case host['platform']
when /fedora|centos|el|redhat|scientific/ then '/var/log/messages'
when /ubuntu|debian|cumulus/ then '/var/log/syslog'
else return
end
logger.notify "\n*************************"
logger.notify "* Dumping master log *"
logger.notify "*************************"
host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1])
logger.notify "*************************\n"
end
# @!visibility private
def lay_down_new_puppet_conf( host, configuration_options, testdir )
puppetconf_main = host.puppet('master')['config']
puppetconf_filename = File.basename(puppetconf_main)
puppetconf_test = File.join(testdir, puppetconf_filename)
new_conf = puppet_conf_for( host, configuration_options )
create_remote_file host, puppetconf_test, new_conf.to_s
host.exec(
Command.new( "cat #{puppetconf_test} > #{puppetconf_main}" ),
:silent => true
)
host.exec( Command.new( "cat #{puppetconf_main}" ) )
end
# @!visibility private
def puppet_conf_for host, conf_opts
puppetconf = host.exec( Command.new( "cat #{host.puppet('master')['config']}" ) ).stdout
new_conf = IniFile.new( puppetconf ).merge( conf_opts )
new_conf
end
# @!visibility private
def bounce_service host, service, curl_retries = 120
if host.graceful_restarts?
apachectl_path = host.is_pe? ? "#{host['puppetsbindir']}/apache2ctl" : 'apache2ctl'
host.exec(Command.new("#{apachectl_path} graceful"))
else
host.exec puppet_resource('service', service, 'ensure=stopped')
host.exec puppet_resource('service', service, 'ensure=running')
end
curl_with_retries(" #{service} ", host, "https://localhost:8140", [35, 60], curl_retries)
end
# Runs 'puppet apply' on a remote host, piping manifest through stdin
#
# @param [Host] host The host that this command should be run on
#
# @param [String] manifest The puppet manifest to apply
#
# @!macro common_opts
# @option opts [Boolean] :parseonly (false) If this key is true, the
# "--parseonly" command line parameter will
# be passed to the 'puppet apply' command.
#
# @option opts [Boolean] :trace (false) If this key exists in the Hash,
# the "--trace" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [Array<Integer>] :acceptable_exit_codes ([0]) The list of exit
# codes that will NOT raise an error when found upon
# command completion. If provided, these values will
# be combined with those used in :catch_failures and
# :expect_failures to create the full list of
# passing exit codes.
#
# @option opts [Hash] :environment Additional environment variables to be
# passed to the 'puppet apply' command
#
# @option opts [Boolean] :catch_failures (false) By default `puppet
# --apply` will exit with 0, which does not count
# as a test failure, even if there were errors or
# changes when applying the manifest. This option
# enables detailed exit codes and causes a test
# failure if `puppet --apply` indicates there was
# a failure during its execution.
#
# @option opts [Boolean] :catch_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# changes or failures during its execution.
#
# @option opts [Boolean] :expect_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# no resource changes during its execution.
#
# @option opts [Boolean] :expect_failures (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates there were no
# failure during its execution.
#
# @option opts [Boolean] :future_parser (false) This option enables
# the future parser option that is available
# from Puppet verion 3.2
# By default it will use the 'current' parser.
#
# @option opts [Boolean] :noop (false) If this option exists, the
# the "--noop" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [String] :modulepath The search path for modules, as
# a list of directories separated by the system
# path separator character. (The POSIX path separator
# is ‘:’, and the Windows path separator is ‘;’.)
#
# @option opts [String] :debug (false) If this option exists,
# the "--debug" command line parameter
# will be passed to the 'puppet apply' command.
#
# @param [Block] block This method will yield to a block of code passed
# by the caller; this can be used for additional
# validation, etc.
#
# @return [Array<Result>, Result, nil] An array of results, a result object,
# or nil. Check {#run_block_on} for more details on this.
def apply_manifest_on(host, manifest, opts = {}, &block)
block_on host do | host |
on_options = {}
on_options[:acceptable_exit_codes] = Array(opts[:acceptable_exit_codes])
puppet_apply_opts = {}
if opts[:debug]
puppet_apply_opts[:debug] = nil
else
puppet_apply_opts[:verbose] = nil
end
puppet_apply_opts[:parseonly] = nil if opts[:parseonly]
puppet_apply_opts[:trace] = nil if opts[:trace]
puppet_apply_opts[:parser] = 'future' if opts[:future_parser]
puppet_apply_opts[:modulepath] = opts[:modulepath] if opts[:modulepath]
puppet_apply_opts[:noop] = nil if opts[:noop]
# From puppet help:
# "... an exit code of '2' means there were changes, an exit code of
# '4' means there were failures during the transaction, and an exit
# code of '6' means there were both changes and failures."
if [opts[:catch_changes],opts[:catch_failures],opts[:expect_failures],opts[:expect_changes]].compact.length > 1
raise(ArgumentError,
'Cannot specify more than one of `catch_failures`, ' +
'`catch_changes`, `expect_failures`, or `expect_changes` ' +
'for a single manifest')
end
if opts[:catch_changes]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after idempotency so allow exit code 0 only.
on_options[:acceptable_exit_codes] |= [0]
elsif opts[:catch_failures]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after only complete success so allow exit codes 0 and 2 only.
on_options[:acceptable_exit_codes] |= [0, 2]
elsif opts[:expect_failures]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after failures specifically so allow exit codes 1, 4, and 6 only.
on_options[:acceptable_exit_codes] |= [1, 4, 6]
elsif opts[:expect_changes]
puppet_apply_opts['detailed-exitcodes'] = nil
# We're after changes specifically so allow exit code 2 only.
on_options[:acceptable_exit_codes] |= [2]
else
# Either use the provided acceptable_exit_codes or default to [0]
on_options[:acceptable_exit_codes] |= [0]
end
# Not really thrilled with this implementation, might want to improve it
# later. Basically, there is a magic trick in the constructor of
# PuppetCommand which allows you to pass in a Hash for the last value in
# the *args Array; if you do so, it will be treated specially. So, here
# we check to see if our caller passed us a hash of environment variables
# that they want to set for the puppet command. If so, we set the final
# value of *args to a new hash with just one entry (the value of which
# is our environment variables hash)
if opts.has_key?(:environment)
puppet_apply_opts['ENV'] = opts[:environment]
end
file_path = host.tmpfile('apply_manifest.pp')
create_remote_file(host, file_path, manifest + "\n")
if host[:default_apply_opts].respond_to? :merge
puppet_apply_opts = host[:default_apply_opts].merge( puppet_apply_opts )
end
on host, puppet('apply', file_path, puppet_apply_opts), on_options, &block
end
end
# Runs 'puppet apply' on default host, piping manifest through stdin
# @see #apply_manifest_on
def apply_manifest(manifest, opts = {}, &block)
apply_manifest_on(default, manifest, opts, &block)
end
# @deprecated
def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test',
options={}, &block)
block_on host do | host |
on host, puppet_agent(arg), options, &block
end
end
# This method using the puppet resource 'host' will setup host aliases
# and register the remove of host aliases via Beaker::TestCase#teardown
#
# A teardown step is also added to make sure unstubbing of the host is
# removed always.
#
# @param [Host, Array<Host>, String, Symbol] machine One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @example Stub puppetlabs.com on the master to 127.0.0.1
# stub_hosts_on(master, 'puppetlabs.com' => '127.0.0.1')
def stub_hosts_on(machine, ip_spec)
block_on machine do | host |
ip_spec.each do |address, ip|
logger.notify("Stubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=present', "ip=#{ip}") )
end
teardown do
ip_spec.each do |address, ip|
logger.notify("Unstubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=absent') )
end
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @example Stub puppetlabs.com on the master to 127.0.0.1
# with_host_stubbed_on(master, 'forgeapi.puppetlabs.com' => '127.0.0.1') do
# puppet( "module install puppetlabs-stdlib" )
# end
def with_host_stubbed_on(host, ip_spec, &block)
begin
block_on host do |host|
ip_spec.each_pair do |address, ip|
logger.notify("Stubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=present', "ip=#{ip}") )
end
end
block.call
ensure
ip_spec.each do |address, ip|
logger.notify("Unstubbing address #{address} to IP #{ip} on machine #{host}")
on( host, puppet('resource', 'host', address, 'ensure=absent') )
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block on the default host
#
# @example Stub puppetlabs.com on the default host to 127.0.0.1
# stub_hosts('puppetlabs.com' => '127.0.0.1')
# @see #stub_hosts_on
def stub_hosts(ip_spec)
stub_hosts_on(default, ip_spec)
end
# This wraps the method `stub_hosts_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param machine [String] the host to perform the stub on
# @param forge_host [String] The URL to use as the forge alias, will default to using :forge_host in the
# global options hash
def stub_forge_on(machine, forge_host = nil)
#use global options hash
forge_host ||= options[:forge_host]
@forge_ip ||= Resolv.getaddress(forge_host)
block_on machine do | host |
stub_hosts_on(host, 'forge.puppetlabs.com' => @forge_ip)
stub_hosts_on(host, 'forgeapi.puppetlabs.com' => @forge_ip)
end
end
# This wraps the method `with_host_stubbed_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param host [String] the host to perform the stub on
# @param forge_host [String] The URL to use as the forge alias, will default to using :forge_host in the
# global options hash
def with_forge_stubbed_on( host, forge_host = nil, &block )
#use global options hash
forge_host ||= options[:forge_host]
@forge_ip ||= Resolv.getaddress(forge_host)
with_host_stubbed_on( host,
{'forge.puppetlabs.com' => @forge_ip,
'forgeapi.puppetlabs.com' => @forge_ip},
&block )
end
# This wraps `with_forge_stubbed_on` and provides it the default host
# @see with_forge_stubbed_on
def with_forge_stubbed( forge_host = nil, &block )
with_forge_stubbed_on( default, forge_host, &block )
end
# This wraps the method `stub_hosts` and makes the stub specific to
# the forge alias.
#
# @see #stub_forge_on
def stub_forge(forge_host = nil)
#use global options hash
forge_host ||= options[:forge_host]
stub_forge_on(default, forge_host)
end
def sleep_until_puppetdb_started(host)
curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120)
curl_with_retries("start puppetdb (ssl)",
host, "https://#{host.node_name}:8081", [35, 60])
end
def sleep_until_puppetserver_started(host)
curl_with_retries("start puppetserver (ssl)",
host, "https://#{host.node_name}:8140", [35, 60])
end
def sleep_until_nc_started(host)
curl_with_retries("start nodeclassifier (ssl)",
host, "https://#{host.node_name}:4433", [35, 60])
end
#stops the puppet agent running on the host
# @param [Host, Array<Host>, String, Symbol] agent One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
def stop_agent_on(agent)
block_on agent do | host |
vardir = agent.puppet['vardir']
agent_running = true
while agent_running
agent_running = agent.file_exist?("#{vardir}/state/agent_catalog_run.lock")
if agent_running
sleep 2
end
end
# In 4.0 this was changed to just be `puppet`
agent_service = 'puppet'
if !aio_version?(agent)
# The agent service is `pe-puppet` everywhere EXCEPT certain linux distros on PE 2.8
# In all the case that it is different, this init script will exist. So we can assume
# that if the script doesn't exist, we should just use `pe-puppet`
agent_service = 'pe-puppet-agent'
agent_service = 'pe-puppet' unless agent.file_exist?('/etc/init.d/pe-puppet-agent')
end
# Under a number of stupid circumstances, we can't stop the
# agent using puppet. This is usually because of issues with
# the init script or system on that particular configuration.
avoid_puppet_at_all_costs = false
avoid_puppet_at_all_costs ||= agent['platform'] =~ /el-4/
avoid_puppet_at_all_costs ||= agent['pe_ver'] && version_is_less(agent['pe_ver'], '3.2') && agent['platform'] =~ /sles/
if avoid_puppet_at_all_costs
# When upgrading, puppet is already stopped. On EL4, this causes an exit code of '1'
on agent, "/etc/init.d/#{agent_service} stop", :acceptable_exit_codes => [0, 1]
else
on agent, puppet_resource('service', agent_service, 'ensure=stopped')
end
end
end
#stops the puppet agent running on the default host
# @see #stop_agent_on
def stop_agent
stop_agent_on(default)
end
#wait for a given host to appear in the dashboard
def wait_for_host_in_dashboard(host)
hostname = host.node_name
if host['platform'] =~ /aix/ then
curl_opts = '--tlsv1 -I'
else
curl_opts = '--tlsv1 -k -I'
end
retry_on(dashboard, "! curl #{curl_opts} https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'")
end
# Ensure the host has requested a cert, then sign it
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
#
# @return nil
# @raise [FailTest] if process times out
def sign_certificate_for(host)
block_on host do | host |
if [master, dashboard, database].include? host
on host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2]
on master, puppet( "cert --allow-dns-alt-names sign #{host}" ), :acceptable_exit_codes => [0,24]
else
hostname = Regexp.escape host.node_name
last_sleep = 0
next_sleep = 1
(0..10).each do |i|
fail_test("Failed to sign cert for #{hostname}") if i == 10
on master, puppet("cert --sign --all --allow-dns-alt-names"), :acceptable_exit_codes => [0,24]
break if on(master, puppet("cert --list --all")).stdout =~ /\+ "?#{hostname}"?/
sleep next_sleep
(last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep
end
end
end
end
#prompt the master to sign certs then check to confirm the cert for the default host is signed
#@see #sign_certificate_for
def sign_certificate
sign_certificate_for(default)
end
# Create a temp directory on remote host with a user. Default user
# is puppet master user.
#
# @param [Host] host A single remote host on which to create and adjust
# the ownership of a temp directory.
# @param [String] name A remote path prefix for the new temp
# directory. Default value is '/tmp/beaker'
# @param [String] user The name of user that should own the temp
# directory. If no username is specified, use `puppet master
# --configprint user` to obtain username from master. Raise RuntimeError
# if this puppet command returns a non-zero exit code.
#
# @return [String] Returns the name of the newly-created dir.
def create_tmpdir_for_user(host, name='/tmp/beaker', user=nil)
if not user
result = on host, puppet("master --configprint user")
if not result.exit_code == 0
raise "`puppet master --configprint` failed, check that puppet is installed on #{host} or explicitly pass in a user name."
end
user = result.stdout.strip
end
create_tmpdir_on(host, name, user)
end
end
end
end
end
| 1 | 11,558 | perhaps these magic numbers could go into a Defaults module somewhere? 8140 implies the default is for puppet server. i guess that's somewhat reasonable. but if we're trying to decouple beaker from puppet, maybe this should be 80? | voxpupuli-beaker | rb |
@@ -7595,6 +7595,13 @@ dr_prepopulate_cache(app_pc *tags, size_t tags_count)
return true;
}
+DR_API
+uint64
+dr_stats_get_built_blocks_count() {
+ return GLOBAL_STAT(num_bbs);
+}
+
+
/***************************************************************************
* PERSISTENCE
*/ | 1 | /* ******************************************************************************
* Copyright (c) 2010-2018 Google, Inc. All rights reserved.
* Copyright (c) 2010-2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2002-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2002-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2002 Hewlett-Packard Company */
/*
* instrument.c - interface for instrumentation
*/
#include "../globals.h" /* just to disable warning C4206 about an empty file */
#include "instrument.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "decode.h"
#include "disassemble.h"
#include "../fragment.h"
#include "../fcache.h"
#include "../emit.h"
#include "../link.h"
#include "../monitor.h" /* for mark_trace_head */
#include <string.h> /* for strstr */
#include <stdarg.h> /* for varargs */
#include "../nudge.h" /* for nudge_internal() */
#include "../synch.h"
#include "../annotations.h"
#include "../translate.h"
#ifdef UNIX
# include <sys/time.h> /* ITIMER_* */
# include "../unix/module.h" /* redirect_* functions */
#endif
#ifdef CLIENT_INTERFACE
/* in utils.c, not exported to everyone */
extern ssize_t do_file_write(file_t f, const char *fmt, va_list ap);
#ifdef DEBUG
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
#endif
/* PR 200065: User passes us the shared library, we look up "dr_init"
* or "dr_client_main" and call it. From there, the client can register which events it
* wishes to receive.
*/
#define INSTRUMENT_INIT_NAME_LEGACY "dr_init"
#define INSTRUMENT_INIT_NAME "dr_client_main"
/* PR 250952: version check
* If changing this, don't forget to update:
* - lib/dr_defines.h _USES_DR_VERSION_
* - api/docs/footer.html
*/
#define USES_DR_VERSION_NAME "_USES_DR_VERSION_"
/* Should we expose this for use in samples/tracedump.c?
* Also, if we change this, need to change the symlink generation
* in core/CMakeLists.txt: at that point should share single define.
*/
/* OLDEST_COMPATIBLE_VERSION now comes from configure.h */
/* The 3rd version number, the bugfix/patch number, should not affect
* compatibility, so our version check number simply uses:
* major*100 + minor
* Which gives us room for 100 minor versions per major.
*/
#define NEWEST_COMPATIBLE_VERSION CURRENT_API_VERSION
/* Store the unique not-part-of-version build number (the version
* BUILD_NUMBER is limited to 64K and is not guaranteed to be unique)
* somewhere accessible at a customer site. We could alternatively
* pull it out of our DYNAMORIO_DEFINES string.
*/
DR_API const char *unique_build_number = STRINGIFY(UNIQUE_BUILD_NUMBER);
/* Acquire when registering or unregistering event callbacks
* Also held when invoking events, which happens much more often
* than registration changes, so we use rwlock
*/
DECLARE_CXTSWPROT_VAR(static read_write_lock_t callback_registration_lock,
INIT_READWRITE_LOCK(callback_registration_lock));
/* Structures for maintaining lists of event callbacks */
typedef void (*callback_t)(void);
typedef struct _callback_list_t {
callback_t *callbacks; /* array of callback functions */
size_t num; /* number of callbacks registered */
size_t size; /* allocated space (may be larger than num) */
} callback_list_t;
/* This is a little convoluted. The following is a macro to iterate
* over a list of callbacks and call each function. We use a macro
* instead of a function so we can pass the function type and perform
* a typecast. We need to copy the callback list before iterating to
* support the possibility of one callback unregistering another and
* messing up the list while we're iterating. We'll optimize the case
* for 5 or fewer registered callbacks and stack-allocate the temp
* list. Otherwise, we'll heap-allocate the temp.
*
* We allow the args to use the var "idx" to access the client index.
*
* We consider the first registered callback to have the highest
* priority and call it last. If we gave the last registered callback
* the highest priority, a client could re-register a routine to
* increase its priority. That seems a little weird.
*/
/*
*/
#define FAST_COPY_SIZE 5
#define call_all_ret(ret, retop, postop, vec, type, ...) \
do { \
size_t idx, num; \
/* we will be called even if no callbacks (i.e., (vec).num == 0) */ \
/* we guarantee we're in DR state at all callbacks and clean calls */ \
/* XXX: add CLIENT_ASSERT here */ \
read_lock(&callback_registration_lock); \
num = (vec).num; \
if (num == 0) { \
read_unlock(&callback_registration_lock); \
} \
else if (num <= FAST_COPY_SIZE) { \
callback_t tmp[FAST_COPY_SIZE]; \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
read_unlock(&callback_registration_lock); \
for (idx=0; idx<num; idx++) { \
ret retop (((type)tmp[num-idx-1])(__VA_ARGS__)) postop; \
} \
} \
else { \
callback_t *tmp = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, callback_t, \
num, ACCT_OTHER, UNPROTECTED); \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
read_unlock(&callback_registration_lock); \
for (idx=0; idx<num; idx++) { \
ret retop (((type)tmp[num-idx-1])(__VA_ARGS__)) postop; \
} \
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, tmp, callback_t, num, \
ACCT_OTHER, UNPROTECTED); \
} \
} while (0)
/* It's less error-prone if we just have one call_all macro. We'll
* reuse call_all_ret above for callbacks that don't have a return
* value by assigning to a dummy var. Note that this means we'll
* have to pass an int-returning type to call_all()
*/
#define call_all(vec, type, ...) \
do { \
int dummy; \
call_all_ret(dummy, =, , vec, type, __VA_ARGS__); \
} while (0)
/* Lists of callbacks for each event type. Note that init and nudge
* callback lists are kept in the client_lib_t data structure below.
* We could store all lists on a per-client basis, but we can iterate
* over these lists slightly more efficiently if we store all
* callbacks for a specific event in a single list.
*/
static callback_list_t exit_callbacks = {0,};
static callback_list_t thread_init_callbacks = {0,};
static callback_list_t thread_exit_callbacks = {0,};
#ifdef UNIX
static callback_list_t fork_init_callbacks = {0,};
#endif
static callback_list_t bb_callbacks = {0,};
static callback_list_t trace_callbacks = {0,};
#ifdef CUSTOM_TRACES
static callback_list_t end_trace_callbacks = {0,};
#endif
static callback_list_t fragdel_callbacks = {0,};
static callback_list_t restore_state_callbacks = {0,};
static callback_list_t restore_state_ex_callbacks = {0,};
static callback_list_t module_load_callbacks = {0,};
static callback_list_t module_unload_callbacks = {0,};
static callback_list_t filter_syscall_callbacks = {0,};
static callback_list_t pre_syscall_callbacks = {0,};
static callback_list_t post_syscall_callbacks = {0,};
static callback_list_t kernel_xfer_callbacks = {0,};
#ifdef WINDOWS
static callback_list_t exception_callbacks = {0,};
#else
static callback_list_t signal_callbacks = {0,};
#endif
#ifdef PROGRAM_SHEPHERDING
static callback_list_t security_violation_callbacks = {0,};
#endif
static callback_list_t persist_ro_size_callbacks = {0,};
static callback_list_t persist_ro_callbacks = {0,};
static callback_list_t resurrect_ro_callbacks = {0,};
static callback_list_t persist_rx_size_callbacks = {0,};
static callback_list_t persist_rx_callbacks = {0,};
static callback_list_t resurrect_rx_callbacks = {0,};
static callback_list_t persist_rw_size_callbacks = {0,};
static callback_list_t persist_rw_callbacks = {0,};
static callback_list_t resurrect_rw_callbacks = {0,};
static callback_list_t persist_patch_callbacks = {0,};
/* An array of client libraries. We use a static array instead of a
* heap-allocated list so we can load the client libs before
* initializing DR's heap.
*/
typedef struct _client_lib_t {
client_id_t id;
char path[MAXIMUM_PATH];
/* PR 366195: dlopen() handle truly is opaque: != start */
shlib_handle_t lib;
app_pc start;
app_pc end;
/* The raw option string, which after i#1736 contains token-delimiting quotes */
char options[MAX_OPTION_LENGTH];
/* The option string with token-delimiting quotes removed for backward compat */
char legacy_options[MAX_OPTION_LENGTH];
/* The parsed options: */
int argc;
const char **argv;
/* We need to associate nudge events with a specific client so we
* store that list here in the client_lib_t instead of using a
* single global list.
*/
callback_list_t nudge_callbacks;
} client_lib_t;
/* these should only be modified prior to instrument_init(), since no
* readers of the client_libs array (event handlers, etc.) use synch
*/
static client_lib_t client_libs[MAX_CLIENT_LIBS] = {{0,}};
static size_t num_client_libs = 0;
static void *persist_user_data[MAX_CLIENT_LIBS];
#ifdef WINDOWS
/* private kernel32 lib, used to print to console */
static bool print_to_console;
static shlib_handle_t priv_kernel32;
typedef BOOL (WINAPI *kernel32_WriteFile_t)
(HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED);
static kernel32_WriteFile_t kernel32_WriteFile;
static ssize_t dr_write_to_console_varg(bool to_stdout, const char *fmt, ...);
#endif
bool client_requested_exit;
#ifdef WINDOWS
/* used for nudge support */
static bool block_client_nudge_threads = false;
DECLARE_CXTSWPROT_VAR(static int num_client_nudge_threads, 0);
#endif
#ifdef CLIENT_SIDELINE
/* # of sideline threads */
DECLARE_CXTSWPROT_VAR(static int num_client_sideline_threads, 0);
#endif
#if defined(WINDOWS) || defined(CLIENT_SIDELINE)
/* protects block_client_nudge_threads and incrementing num_client_nudge_threads */
DECLARE_CXTSWPROT_VAR(static mutex_t client_thread_count_lock,
INIT_LOCK_FREE(client_thread_count_lock));
#endif
static vm_area_vector_t *client_aux_libs;
#ifdef WINDOWS
DECLARE_CXTSWPROT_VAR(static mutex_t client_aux_lib64_lock,
INIT_LOCK_FREE(client_aux_lib64_lock));
#endif
/****************************************************************************/
/* INTERNAL ROUTINES */
static bool
char_is_quote(char c)
{
return c == '"' || c == '\'' || c == '`';
}
static void
parse_option_array(client_id_t client_id, const char *opstr,
int *argc OUT, const char ***argv OUT,
size_t max_token_size)
{
const char **a;
int cnt;
const char *s;
char *token = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, char, max_token_size,
ACCT_CLIENT, UNPROTECTED);
for (cnt = 0, s = dr_get_token(opstr, token, max_token_size);
s != NULL;
s = dr_get_token(s, token, max_token_size)) {
cnt++;
}
cnt++; /* add 1 so 0 can be "app" */
a = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, const char *, cnt, ACCT_CLIENT, UNPROTECTED);
cnt = 0;
a[cnt] = dr_strdup(dr_get_client_path(client_id) HEAPACCT(ACCT_CLIENT));
cnt++;
for (s = dr_get_token(opstr, token, max_token_size);
s != NULL;
s = dr_get_token(s, token, max_token_size)) {
a[cnt] = dr_strdup(token HEAPACCT(ACCT_CLIENT));
cnt++;
}
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, token, char, max_token_size,
ACCT_CLIENT, UNPROTECTED);
*argc = cnt;
*argv = a;
}
static bool
free_option_array(int argc, const char **argv)
{
int i;
for (i = 0; i < argc; i++) {
dr_strfree(argv[i] HEAPACCT(ACCT_CLIENT));
}
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, argv, char *, argc, ACCT_CLIENT, UNPROTECTED);
return true;
}
static void
add_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
if (func == NULL) {
CLIENT_ASSERT(false, "trying to register a NULL callback");
return;
}
if (standalone_library) {
CLIENT_ASSERT(false, "events not supported in standalone library mode");
return;
}
write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
/* We may already have an open slot since we allocate in twos and
* because we don't bother to free the storage when we remove the
* callback. Check and only allocate if necessary.
*/
if (vec->num == vec->size) {
callback_t *tmp = HEAP_ARRAY_ALLOC
(GLOBAL_DCONTEXT, callback_t, vec->size + 2, /* Let's allocate 2 */
ACCT_OTHER, UNPROTECTED);
if (tmp == NULL) {
CLIENT_ASSERT(false, "out of memory: can't register callback");
write_unlock(&callback_registration_lock);
return;
}
if (vec->callbacks != NULL) {
memcpy(tmp, vec->callbacks, vec->num * sizeof(callback_t));
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
}
vec->callbacks = tmp;
vec->size += 2;
}
vec->callbacks[vec->num] = func;
vec->num++;
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
write_unlock(&callback_registration_lock);
}
static bool
remove_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
size_t i;
bool found = false;
if (func == NULL) {
CLIENT_ASSERT(false, "trying to unregister a NULL callback");
return false;
}
write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
for (i=0; i<vec->num; i++) {
if (vec->callbacks[i] == func) {
size_t j;
/* shift down the entries on the tail */
for (j=i; j<vec->num-1; j++) {
vec->callbacks[j] = vec->callbacks[j+1];
}
vec->num -= 1;
found = true;
break;
}
}
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
write_unlock(&callback_registration_lock);
return found;
}
/* This should only be called prior to instrument_init(),
* since no readers of the client_libs array use synch
* and since this routine assumes .data is writable.
*/
static void
add_client_lib(const char *path, const char *id_str, const char *options)
{
client_id_t id;
shlib_handle_t client_lib;
DEBUG_DECLARE(size_t i);
ASSERT(!dynamo_initialized);
/* if ID not specified, we'll default to 0 */
id = (id_str == NULL) ? 0 : strtoul(id_str, NULL, 16);
#ifdef DEBUG
/* Check for conflicting IDs */
for (i=0; i<num_client_libs; i++) {
CLIENT_ASSERT(client_libs[i].id != id, "Clients have the same ID");
}
#endif
if (num_client_libs == MAX_CLIENT_LIBS) {
CLIENT_ASSERT(false, "Max number of clients reached");
return;
}
LOG(GLOBAL, LOG_INTERP, 4, "about to load client library %s\n", path);
client_lib = load_shared_library(path, IF_X64_ELSE(DYNAMO_OPTION(reachable_client),
true));
if (client_lib == NULL) {
char msg[MAXIMUM_PATH*4];
char err[MAXIMUM_PATH*2];
shared_library_error(err, BUFFER_SIZE_ELEMENTS(err));
snprintf(msg, BUFFER_SIZE_ELEMENTS(msg),
".\n\tError opening instrumentation library %s:\n\t%s",
path, err);
NULL_TERMINATE_BUFFER(msg);
/* PR 232490 - malformed library names or incorrect
* permissions shouldn't blow up an app in release builds as
* they may happen at customer sites with a third party
* client.
*/
/* PR 408318: 32-vs-64 errors should NOT be fatal to continue
* in debug build across execve chains. Xref i#147.
* XXX: w/ -private_loader, err always equals "error in private loader"
* and so we never match here!
*/
IF_UNIX(if (strstr(err, "wrong ELF class") == NULL))
CLIENT_ASSERT(false, msg);
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4,
get_application_name(), get_application_pid(), path, msg);
}
else {
/* PR 250952: version check */
int *uses_dr_version = (int *)
lookup_library_routine(client_lib, USES_DR_VERSION_NAME);
if (uses_dr_version == NULL ||
*uses_dr_version < OLDEST_COMPATIBLE_VERSION ||
*uses_dr_version > NEWEST_COMPATIBLE_VERSION) {
/* not a fatal usage error since we want release build to continue */
CLIENT_ASSERT(false,
"client library is incompatible with this version of DR");
SYSLOG(SYSLOG_ERROR, CLIENT_VERSION_INCOMPATIBLE, 2,
get_application_name(), get_application_pid());
}
else {
size_t idx = num_client_libs++;
DEBUG_DECLARE(bool ok;)
client_libs[idx].id = id;
client_libs[idx].lib = client_lib;
DEBUG_DECLARE(ok =)
shared_library_bounds(client_lib, (byte *) uses_dr_version, NULL,
&client_libs[idx].start, &client_libs[idx].end);
ASSERT(ok);
LOG(GLOBAL, LOG_INTERP, 1, "loaded %s at "PFX"-"PFX"\n",
path, client_libs[idx].start, client_libs[idx].end);
#ifdef X64
/* Now that we map the client within the constraints, this request
* should always succeed.
*/
if (DYNAMO_OPTION(reachable_client)) {
request_region_be_heap_reachable(client_libs[idx].start,
client_libs[idx].end -
client_libs[idx].start);
}
#endif
strncpy(client_libs[idx].path, path,
BUFFER_SIZE_ELEMENTS(client_libs[idx].path));
NULL_TERMINATE_BUFFER(client_libs[idx].path);
if (options != NULL) {
strncpy(client_libs[idx].options, options,
BUFFER_SIZE_ELEMENTS(client_libs[idx].options));
NULL_TERMINATE_BUFFER(client_libs[idx].options);
}
/* We'll look up dr_client_main and call it in instrument_init */
}
}
}
void
instrument_load_client_libs(void)
{
if (CLIENTS_EXIST()) {
char buf[MAX_LIST_OPTION_LENGTH];
char *path;
string_option_read_lock();
strncpy(buf, INTERNAL_OPTION(client_lib), BUFFER_SIZE_ELEMENTS(buf));
string_option_read_unlock();
NULL_TERMINATE_BUFFER(buf);
/* We're expecting path;ID;options triples */
path = buf;
do {
char *id = NULL;
char *options = NULL;
char *next_path = NULL;
id = strstr(path, ";");
if (id != NULL) {
id[0] = '\0';
id++;
options = strstr(id, ";");
if (options != NULL) {
options[0] = '\0';
options++;
next_path = strstr(options, ";");
if (next_path != NULL) {
next_path[0] = '\0';
next_path++;
}
}
}
#ifdef STATIC_LIBRARY
/* We ignore client library paths and allow client code anywhere in the app.
* We have a check in load_shared_library() to avoid loading
* a 2nd copy of the app.
* We do support passing client ID and options via the first -client_lib.
*/
add_client_lib(get_application_name(), id == NULL ? "0" : id,
options == NULL ? "" : options);
break;
#endif
add_client_lib(path, id, options);
path = next_path;
} while (path != NULL);
}
}
static void
init_client_aux_libs(void)
{
if (client_aux_libs == NULL) {
VMVECTOR_ALLOC_VECTOR(client_aux_libs, GLOBAL_DCONTEXT,
VECTOR_SHARED, client_aux_libs);
}
}
void
instrument_init(void)
{
size_t i;
init_client_aux_libs();
if (num_client_libs > 0) {
/* We no longer distinguish in-DR vs in-client crashes, as many crashes in
* the DR lib are really client bugs.
* We expect most end-user tools to call dr_set_client_name() so we
* have generic defaults here:
*/
set_exception_strings("Tool", "your tool's issue tracker");
}
/* Iterate over the client libs and call each init routine */
for (i=0; i<num_client_libs; i++) {
void (*init)(client_id_t, int, const char **) =
(void (*)(client_id_t, int, const char **))
(lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME));
void (*legacy)(client_id_t) = (void (*)(client_id_t))
(lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME_LEGACY));
/* we can't do this in instrument_load_client_libs() b/c vmheap
* is not set up at that point
*/
all_memory_areas_lock();
update_all_memory_areas(client_libs[i].start, client_libs[i].end,
/* FIXME: need to walk the sections: but may be
* better to obfuscate from clients anyway.
* We can't set as MEMPROT_NONE as that leads to
* bugs if the app wants to interpret part of
* its code section (xref PR 504629).
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
/* i#1736: parse the options up front */
parse_option_array(client_libs[i].id, client_libs[i].options,
&client_libs[i].argc, &client_libs[i].argv,
MAX_OPTION_LENGTH);
#ifdef STATIC_LIBRARY
/* We support the app having client code anywhere, so there does not
* have to be an init routine that we call. This means the app
* may have to iterate modules on its own.
*/
#else
/* Since the user has to register all other events, it
* doesn't make sense to provide the -client_lib
* option for a module that doesn't export an init routine.
*/
CLIENT_ASSERT(init != NULL || legacy != NULL,
"client does not export a dr_client_main or dr_init routine");
#endif
if (init != NULL)
(*init)(client_libs[i].id, client_libs[i].argc, client_libs[i].argv);
else if (legacy != NULL)
(*legacy)(client_libs[i].id);
}
/* We now initialize the 1st thread before coming here, so we can
* hand the client a dcontext; so we need to specially generate
* the thread init event now. An alternative is to have
* dr_get_global_drcontext(), but that's extra complexity for no
* real reason.
* We raise the thread init event prior to the module load events
* so the client can access a dcontext in module load events (i#1339).
*/
if (thread_init_callbacks.num > 0) {
instrument_thread_init(get_thread_private_dcontext(), false, false);
}
/* If the client just registered the module-load event, let's
* assume it wants to be informed of *all* modules and tell it
* which modules are already loaded. If the client registers the
* event later, it will need to use the module iterator routines
* to retrieve currently loaded modules. We use the dr_module_iterator
* exposed to the client to avoid locking issues.
*/
if (module_load_callbacks.num > 0) {
dr_module_iterator_t *mi = dr_module_iterator_start();
while (dr_module_iterator_hasnext(mi)) {
module_data_t *data = dr_module_iterator_next(mi);
instrument_module_load(data, true /*already loaded*/);
/* XXX; more efficient to set this flag during dr_module_iterator_start */
os_module_set_flag(data->start, MODULE_LOAD_EVENT);
dr_free_module_data(data);
}
dr_module_iterator_stop(mi);
}
}
static void
free_callback_list(callback_list_t *vec)
{
if (vec->callbacks != NULL) {
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
vec->callbacks = NULL;
}
vec->size = 0;
vec->num = 0;
}
static void
free_all_callback_lists()
{
free_callback_list(&exit_callbacks);
free_callback_list(&thread_init_callbacks);
free_callback_list(&thread_exit_callbacks);
#ifdef UNIX
free_callback_list(&fork_init_callbacks);
#endif
free_callback_list(&bb_callbacks);
free_callback_list(&trace_callbacks);
#ifdef CUSTOM_TRACES
free_callback_list(&end_trace_callbacks);
#endif
free_callback_list(&fragdel_callbacks);
free_callback_list(&restore_state_callbacks);
free_callback_list(&restore_state_ex_callbacks);
free_callback_list(&module_load_callbacks);
free_callback_list(&module_unload_callbacks);
free_callback_list(&filter_syscall_callbacks);
free_callback_list(&pre_syscall_callbacks);
free_callback_list(&post_syscall_callbacks);
free_callback_list(&kernel_xfer_callbacks);
#ifdef WINDOWS
free_callback_list(&exception_callbacks);
#else
free_callback_list(&signal_callbacks);
#endif
#ifdef PROGRAM_SHEPHERDING
free_callback_list(&security_violation_callbacks);
#endif
free_callback_list(&persist_ro_size_callbacks);
free_callback_list(&persist_ro_callbacks);
free_callback_list(&resurrect_ro_callbacks);
free_callback_list(&persist_rx_size_callbacks);
free_callback_list(&persist_rx_callbacks);
free_callback_list(&resurrect_rx_callbacks);
free_callback_list(&persist_rw_size_callbacks);
free_callback_list(&persist_rw_callbacks);
free_callback_list(&resurrect_rw_callbacks);
free_callback_list(&persist_patch_callbacks);
}
void
instrument_exit_post_sideline(void)
{
#if defined(WINDOWS) || defined(CLIENT_SIDELINE)
DELETE_LOCK(client_thread_count_lock);
#endif
}
void
instrument_exit(void)
{
/* Note - currently own initexit lock when this is called (see PR 227619). */
/* support dr_get_mcontext() from the exit event */
if (!standalone_library)
get_thread_private_dcontext()->client_data->mcontext_in_dcontext = true;
call_all(exit_callbacks, int (*)(),
/* It seems the compiler is confused if we pass no var args
* to the call_all macro. Bogus NULL arg */
NULL);
if (IF_DEBUG_ELSE(true, doing_detach)) {
/* Unload all client libs and free any allocated storage */
size_t i;
for (i=0; i<num_client_libs; i++) {
free_callback_list(&client_libs[i].nudge_callbacks);
unload_shared_library(client_libs[i].lib);
if (client_libs[i].argv != NULL)
free_option_array(client_libs[i].argc, client_libs[i].argv);
}
free_all_callback_lists();
}
vmvector_delete_vector(GLOBAL_DCONTEXT, client_aux_libs);
client_aux_libs = NULL;
num_client_libs = 0;
#ifdef WINDOWS
DELETE_LOCK(client_aux_lib64_lock);
#endif
DELETE_READWRITE_LOCK(callback_registration_lock);
}
bool
is_in_client_lib(app_pc addr)
{
/* NOTE: we use this routine for detecting exceptions in
* clients. If we add a callback on that event we'll have to be
* sure to deliver it only to the right client.
*/
size_t i;
for (i=0; i<num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) &&
(addr < client_libs[i].end)) {
return true;
}
}
if (client_aux_libs != NULL &&
vmvector_overlap(client_aux_libs, addr, addr+1))
return true;
return false;
}
bool
get_client_bounds(client_id_t client_id,
app_pc *start/*OUT*/, app_pc *end/*OUT*/)
{
if (client_id >= num_client_libs)
return false;
if (start != NULL)
*start = (app_pc) client_libs[client_id].start;
if (end != NULL)
*end = (app_pc) client_libs[client_id].end;
return true;
}
const char *
get_client_path_from_addr(app_pc addr)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) &&
(addr < client_libs[i].end)) {
return client_libs[i].path;
}
}
return "";
}
bool
is_valid_client_id(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return true;
}
}
return false;
}
void
dr_register_exit_event(void (*func)(void))
{
add_callback(&exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_exit_event(void (*func)(void))
{
return remove_callback(&exit_callbacks, (void (*)(void))func, true);
}
void
dr_register_bb_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *bb,
bool for_trace, bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for bb event when code_api is disabled");
return;
}
add_callback(&bb_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_bb_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *bb,
bool for_trace, bool translating))
{
return remove_callback(&bb_callbacks, (void (*)(void))func, true);
}
void
dr_register_trace_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *trace,
bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for trace event when code_api is disabled");
return;
}
add_callback(&trace_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_trace_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *trace,
bool translating))
{
return remove_callback(&trace_callbacks, (void (*)(void))func, true);
}
#ifdef CUSTOM_TRACES
void
dr_register_end_trace_event(dr_custom_trace_action_t (*func)
(void *drcontext, void *tag, void *next_tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for end-trace event when code_api is disabled");
return;
}
add_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_end_trace_event(dr_custom_trace_action_t
(*func)(void *drcontext, void *tag, void *next_tag))
{
return remove_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_delete_event(void (*func)(void *drcontext, void *tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for delete event when code_api is disabled");
return;
}
add_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_delete_event(void (*func)(void *drcontext, void *tag))
{
return remove_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_event(void (*func)
(void *drcontext, void *tag, dr_mcontext_t *mcontext,
bool restore_memory, bool app_code_consistent))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore state event when code_api is disabled");
return;
}
add_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_event(void (*func)
(void *drcontext, void *tag, dr_mcontext_t *mcontext,
bool restore_memory, bool app_code_consistent))
{
return remove_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_ex_event(bool (*func) (void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore_state_ex event when code_api disabled");
return;
}
add_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_ex_event(bool (*func) (void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
return remove_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_init_event(void (*func)(void *drcontext))
{
add_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_init_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_exit_event(void (*func)(void *drcontext))
{
add_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_exit_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
#ifdef UNIX
void
dr_register_fork_init_event(void (*func)(void *drcontext))
{
add_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_fork_init_event(void (*func)(void *drcontext))
{
return remove_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
add_callback(&module_load_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
return remove_callback(&module_load_callbacks, (void (*)(void))func, true);
}
void
dr_register_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
add_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
return remove_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
#ifdef WINDOWS
void
dr_register_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
add_callback(&exception_callbacks, (bool (*)(void))func, true);
}
bool
dr_unregister_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
return remove_callback(&exception_callbacks, (bool (*)(void))func, true);
}
#else
void
dr_register_signal_event(dr_signal_action_t (*func)
(void *drcontext, dr_siginfo_t *siginfo))
{
add_callback(&signal_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_signal_event(dr_signal_action_t (*func)
(void *drcontext, dr_siginfo_t *siginfo))
{
return remove_callback(&signal_callbacks, (void (*)(void))func, true);
}
#endif /* WINDOWS */
void
dr_register_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
add_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
return remove_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_kernel_xfer_event(void (*func)(void *drcontext,
const dr_kernel_xfer_info_t *info))
{
add_callback(&kernel_xfer_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_kernel_xfer_event(void (*func)(void *drcontext,
const dr_kernel_xfer_info_t *info))
{
return remove_callback(&kernel_xfer_callbacks, (void (*)(void))func, true);
}
#ifdef PROGRAM_SHEPHERDING
void
dr_register_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
add_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
return remove_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
add_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
return;
}
}
CLIENT_ASSERT(false, "dr_register_nudge_event: invalid client ID");
}
bool
dr_unregister_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return remove_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
}
}
CLIENT_ASSERT(false, "dr_unregister_nudge_event: invalid client ID");
return false;
}
dr_config_status_t
dr_nudge_client_ex(process_id_t process_id, client_id_t client_id,
uint64 argument, uint timeout_ms)
{
if (process_id == get_process_id()) {
size_t i;
#ifdef WINDOWS
pre_second_thread();
#endif
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == client_id) {
if (client_libs[i].nudge_callbacks.num == 0) {
CLIENT_ASSERT(false, "dr_nudge_client: no nudge handler registered");
return false;
}
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
return false;
} else {
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
bool
dr_nudge_client(client_id_t client_id, uint64 argument)
{
return dr_nudge_client_ex(get_process_id(), client_id, argument, 0) == DR_SUCCESS;
}
#ifdef WINDOWS
DR_API
bool
dr_is_nudge_thread(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid parameter to dr_is_nudge_thread");
return dcontext->nudge_target != NULL;
}
#endif
void
instrument_client_thread_init(dcontext_t *dcontext, bool client_thread)
{
if (dcontext->client_data == NULL) {
dcontext->client_data = HEAP_TYPE_ALLOC(dcontext, client_data_t,
ACCT_OTHER, UNPROTECTED);
memset(dcontext->client_data, 0x0, sizeof(client_data_t));
#ifdef CLIENT_SIDELINE
ASSIGN_INIT_LOCK_FREE(dcontext->client_data->sideline_mutex, sideline_mutex);
#endif
CLIENT_ASSERT(dynamo_initialized || thread_init_callbacks.num == 0 ||
client_thread,
"1st call to instrument_thread_init should have no cbs");
}
#ifdef CLIENT_SIDELINE
if (client_thread) {
ATOMIC_INC(int, num_client_sideline_threads);
/* We don't call dynamo_thread_not_under_dynamo() b/c we want itimers. */
dcontext->thread_record->under_dynamo_control = false;
dcontext->client_data->is_client_thread = true;
dcontext->client_data->suspendable = true;
}
#endif /* CLIENT_SIDELINE */
}
void
instrument_thread_init(dcontext_t *dcontext, bool client_thread, bool valid_mc)
{
/* Note that we're called twice for the initial thread: once prior
* to instrument_init() (PR 216936) to set up the dcontext client
* field (at which point there should be no callbacks since client
* has not had a chance to register any) (now split out, but both
* routines are called prior to instrument_init()), and once after
* instrument_init() to call the client event.
*/
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
bool swap_peb = false;
#endif
if (client_thread) {
/* no init event */
return;
}
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
/* i#996: we might be in app's state.
* It is simpler to check and swap here than earlier on thread init paths.
*/
if (dr_using_app_state(dcontext)) {
swap_peb_pointer(dcontext, true/*to priv*/);
swap_peb = true;
}
#endif
/* i#117/PR 395156: support dr_get_mcontext() from the thread init event */
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = true;
call_all(thread_init_callbacks, int (*)(void *), (void *)dcontext);
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = false;
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
if (swap_peb)
swap_peb_pointer(dcontext, false/*to app*/);
#endif
}
#ifdef UNIX
void
instrument_fork_init(dcontext_t *dcontext)
{
call_all(fork_init_callbacks, int (*)(void *), (void *)dcontext);
}
#endif
/* PR 536058: split the exit event from thread cleanup, to provide a
* dcontext in the process exit event
*/
void
instrument_thread_exit_event(dcontext_t *dcontext)
{
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(dcontext)
/* if nudge thread calls dr_exit_process() it will be marked as a client
* thread: rule it out here so we properly clean it up
*/
IF_WINDOWS(&& dcontext->nudge_target == NULL)) {
ATOMIC_DEC(int, num_client_sideline_threads);
/* no exit event */
return;
}
#endif
/* i#1394: best-effort to try to avoid crashing thread exit events
* where thread init was never called.
*/
if (!dynamo_initialized)
return;
/* support dr_get_mcontext() from the exit event */
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently own initexit lock when this is called (see PR 227619). */
call_all(thread_exit_callbacks, int (*)(void *), (void *)dcontext);
}
void
instrument_thread_exit(dcontext_t *dcontext)
{
#ifdef DEBUG
client_todo_list_t *todo;
client_flush_req_t *flush;
#endif
#ifdef DEBUG
/* PR 470957: avoid racy crashes by not freeing in release build */
# ifdef CLIENT_SIDELINE
DELETE_LOCK(dcontext->client_data->sideline_mutex);
# endif
/* could be heap space allocated for the todo list */
todo = dcontext->client_data->to_do;
while (todo != NULL) {
client_todo_list_t *next_todo = todo->next;
if (todo->ilist != NULL) {
instrlist_clear_and_destroy(dcontext, todo->ilist);
}
HEAP_TYPE_FREE(dcontext, todo, client_todo_list_t, ACCT_CLIENT, UNPROTECTED);
todo = next_todo;
}
/* could be heap space allocated for the flush list */
flush = dcontext->client_data->flush_list;
while (flush != NULL) {
client_flush_req_t *next_flush = flush->next;
HEAP_TYPE_FREE(dcontext, flush, client_flush_req_t, ACCT_CLIENT, UNPROTECTED);
flush = next_flush;
}
HEAP_TYPE_FREE(dcontext, dcontext->client_data, client_data_t,
ACCT_OTHER, UNPROTECTED);
dcontext->client_data = NULL; /* for mutex_wait_contended_lock() */
dcontext->is_client_thread_exiting = true; /* for is_using_app_peb() */
#endif /* DEBUG */
}
bool
dr_bb_hook_exists(void)
{
return (bb_callbacks.num > 0);
}
bool
dr_trace_hook_exists(void)
{
return (trace_callbacks.num > 0);
}
bool
dr_fragment_deleted_hook_exists(void)
{
return (fragdel_callbacks.num > 0);
}
bool
dr_end_trace_hook_exists(void)
{
return (end_trace_callbacks.num > 0);
}
bool
dr_thread_exit_hook_exists(void)
{
return (thread_exit_callbacks.num > 0);
}
bool
dr_exit_hook_exists(void)
{
return (exit_callbacks.num > 0);
}
bool
dr_xl8_hook_exists(void)
{
return (restore_state_callbacks.num > 0 ||
restore_state_ex_callbacks.num > 0);
}
#endif /* CLIENT_INTERFACE */
/* needed outside of CLIENT_INTERFACE for simpler USE_BB_BUILDING_LOCK_STEADY_STATE() */
bool
dr_modload_hook_exists(void)
{
/* We do not support (as documented in the module event doxygen)
* the client changing this during bb building, as that will mess
* up USE_BB_BUILDING_LOCK_STEADY_STATE().
*/
return IF_CLIENT_INTERFACE_ELSE(module_load_callbacks.num > 0, false);
}
#ifdef CLIENT_INTERFACE
bool
hide_tag_from_client(app_pc tag)
{
#ifdef WINDOWS
/* Case 10009: Basic blocks that consist of a single jump into the
* interception buffer should be obscured from clients. Clients
* will see the displaced code, so we'll provide the address of this
* block if the client asks for the address of the displaced code.
*
* Note that we assume the jump is the first instruction in the
* BB for any blocks that jump to the interception buffer.
*/
if (is_intercepted_app_pc(tag, NULL) ||
/* Displaced app code is now in the landing pad, so skip the
* jump from the interception buffer to the landing pad
*/
is_in_interception_buffer(tag) ||
/* Landing pads that exist between hook points and the trampolines
* shouldn't be seen by the client too. PR 250294.
*/
is_on_interception_initial_route(tag) ||
/* PR 219351: if we lose control on a callback and get it back on
* one of our syscall trampolines, we'll appear at the jmp out of
* the interception buffer to the int/sysenter instruction. The
* problem is that our syscall trampolines, unlike our other
* intercepted code, are hooked earlier than the real action point
* and we have displaced app code at the start of the interception
* buffer: we hook at the wrapper entrance and return w/ a jmp to
* the sysenter/int instr. When creating bbs at the start we hack
* it to make it look like there is no hook. But on retaking control
* we end up w/ this jmp out that won't be solved w/ our normal
* mechanism for other hook jmp-outs: so we just suppress and the
* client next sees the post-syscall bb. It already saw a gap.
*/
is_syscall_trampoline(tag, NULL))
return true;
#endif
return false;
}
#ifdef DEBUG
/* PR 214962: client must set translation fields */
static void
check_ilist_translations(instrlist_t *ilist)
{
/* Ensure client set the translation field for all non-meta
* instrs, even if it didn't return DR_EMIT_STORE_TRANSLATIONS
* (since we may decide ourselves to store)
*/
instr_t *in;
for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) {
if (!instr_opcode_valid(in)) {
CLIENT_ASSERT(INTERNAL_OPTION(fast_client_decode), "level 0 instr found");
} else if (instr_is_app(in)) {
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) == NULL)
loginst(get_thread_private_dcontext(), 1, in, "translation is NULL");
});
CLIENT_ASSERT(instr_get_translation(in) != NULL,
"translation field must be set for every app instruction");
} else {
/* The meta instr could indeed not affect app state, but
* better I think to assert and make them put in an
* empty restore event callback in that case. */
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) != NULL &&
!instr_is_our_mangling(in) &&
!dr_xl8_hook_exists())
loginst(get_thread_private_dcontext(), 1, in, "translation != NULL");
});
CLIENT_ASSERT(instr_get_translation(in) == NULL ||
instr_is_our_mangling(in) ||
dr_xl8_hook_exists(),
/* FIXME: if multiple clients, we need to check that this
* particular client has the callback: but we have
* no way to do that other than looking at library
* bounds...punting for now */
"a meta instr should not have its translation field "
"set without also having a restore_state callback");
}
}
}
#endif
/* Returns true if the bb hook is called */
bool
instrument_basic_block(dcontext_t *dcontext, app_pc tag, instrlist_t *bb,
bool for_trace, bool translating, dr_emit_flags_t *emitflags)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
/* return false if no BB hooks are registered */
if (bb_callbacks.num == 0)
return false;
if (hide_tag_from_client(tag)) {
LOG(THREAD, LOG_INTERP, 3, "hiding tag "PFX" from client\n", tag);
return false;
}
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_basic_block ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
#endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating && !for_trace)
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently we are couldbelinking and hold the
* bb_building lock when this is called (see PR 227619).
*/
/* We or together the return values */
call_all_ret(ret, |=, , bb_callbacks,
int (*) (void *, void *, instrlist_t *, bool, bool),
(void *)dcontext, (void *)tag, bb, for_trace, translating);
if (emitflags != NULL)
*emitflags = ret;
DOCHECK(1, { check_ilist_translations(bb); });
dcontext->client_data->mcontext_in_dcontext = false;
if (IF_DEBUG_ELSE(for_trace, false)) {
CLIENT_ASSERT(instrlist_get_return_target(bb) == NULL &&
instrlist_get_fall_through_target(bb) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
}
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
#endif
return true;
}
/* Give the user the completely mangled and optimized trace just prior
* to emitting into code cache, user gets final crack at it
*/
dr_emit_flags_t
instrument_trace(dcontext_t *dcontext, app_pc tag, instrlist_t *trace,
bool translating)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
#ifdef UNSUPPORTED_API
instr_t *instr;
#endif
if (trace_callbacks.num == 0)
return DR_EMIT_DEFAULT;
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_trace ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
#endif
/* We always pass Level 3 instrs to the client, since we no longer
* expose the expansion routines.
*/
#ifdef UNSUPPORTED_API
for (instr = instrlist_first_expanded(dcontext, trace);
instr != NULL;
instr = instr_get_next_expanded(dcontext, trace, instr)) {
instr_decode(dcontext, instr);
}
/* ASSUMPTION: all ctis are already at Level 3, so we don't have
* to do a separate pass to fix up intra-list targets like
* instrlist_decode_cti() does
*/
#endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating)
dcontext->client_data->mcontext_in_dcontext = true;
/* We or together the return values */
call_all_ret(ret, |=, , trace_callbacks,
int (*)(void *, void *, instrlist_t *, bool),
(void *)dcontext, (void *)tag, trace, translating);
DOCHECK(1, { check_ilist_translations(trace); });
CLIENT_ASSERT(instrlist_get_return_target(trace) == NULL &&
instrlist_get_fall_through_target(trace) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
dcontext->client_data->mcontext_in_dcontext = false;
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
#endif
return ret;
}
/* Notify user when a fragment is deleted from the cache
* FIXME PR 242544: how does user know whether this is a shadowed copy or the
* real thing? The user might free memory that shouldn't be freed!
*/
void
instrument_fragment_deleted(dcontext_t *dcontext, app_pc tag, uint flags)
{
if (fragdel_callbacks.num == 0)
return;
#ifdef WINDOWS
/* Case 10009: We don't call the basic block hook for blocks that
* are jumps to the interception buffer, so we'll hide them here
* as well.
*/
if (!TEST(FRAG_IS_TRACE, flags) && hide_tag_from_client(tag))
return;
#endif
/* PR 243008: we don't expose GLOBAL_DCONTEXT, so change to NULL.
* Our comments warn the user about this.
*/
if (dcontext == GLOBAL_DCONTEXT)
dcontext = NULL;
call_all(fragdel_callbacks, int (*)(void *, void *),
(void *)dcontext, (void *)tag);
}
bool
instrument_restore_state(dcontext_t *dcontext, bool restore_memory,
dr_restore_state_info_t *info)
{
bool res = true;
/* Support both legacy and extended handlers */
if (restore_state_callbacks.num > 0) {
call_all(restore_state_callbacks,
int (*)(void *, void *, dr_mcontext_t *, bool, bool),
(void *)dcontext, info->fragment_info.tag, info->mcontext,
restore_memory, info->fragment_info.app_code_consistent);
}
if (restore_state_ex_callbacks.num > 0) {
/* i#220/PR 480565: client has option of failing the translation.
* We fail it if any client wants to, short-circuiting in that case.
* This does violate the "priority order" of events where the
* last one is supposed to have final say b/c it won't even
* see the event (xref i#424).
*/
call_all_ret(res, = res &&, , restore_state_ex_callbacks,
int (*)(void *, bool, dr_restore_state_info_t *),
(void *)dcontext, restore_memory, info);
}
CLIENT_ASSERT(!restore_memory || res,
"translation should not fail for restore_memory=true");
return res;
}
#ifdef CUSTOM_TRACES
/* Ask whether to end trace prior to adding next_tag fragment.
* Return values:
* CUSTOM_TRACE_DR_DECIDES = use standard termination criteria
* CUSTOM_TRACE_END_NOW = end trace
* CUSTOM_TRACE_CONTINUE = do not end trace
*/
dr_custom_trace_action_t
instrument_end_trace(dcontext_t *dcontext, app_pc trace_tag, app_pc next_tag)
{
dr_custom_trace_action_t ret = CUSTOM_TRACE_DR_DECIDES;
if (end_trace_callbacks.num == 0)
return ret;
/* Highest priority callback decides how to end the trace (see
* call_all_ret implementation)
*/
call_all_ret(ret, =, , end_trace_callbacks, int (*)(void *, void *, void *),
(void *)dcontext, (void *)trace_tag, (void *)next_tag);
return ret;
}
#endif
static module_data_t *
create_and_initialize_module_data(app_pc start, app_pc end, app_pc entry_point,
uint flags, const module_names_t *names,
const char *full_path
#ifdef WINDOWS
, version_number_t file_version,
version_number_t product_version,
uint checksum, uint timestamp,
size_t mod_size
#else
, bool contiguous,
uint num_segments,
module_segment_t *os_segments,
module_segment_data_t *segments,
uint timestamp
# ifdef MACOS
, uint current_version,
uint compatibility_version,
const byte uuid[16]
# endif
#endif
)
{
#ifndef WINDOWS
uint i;
#endif
module_data_t *copy = (module_data_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, module_data_t, ACCT_CLIENT, UNPROTECTED);
memset(copy, 0, sizeof(module_data_t));
copy->start = start;
copy->end = end;
copy->entry_point = entry_point;
copy->flags = flags;
if (full_path != NULL)
copy->full_path = dr_strdup(full_path HEAPACCT(ACCT_CLIENT));
if (names->module_name != NULL)
copy->names.module_name = dr_strdup(names->module_name HEAPACCT(ACCT_CLIENT));
if (names->file_name != NULL)
copy->names.file_name = dr_strdup(names->file_name HEAPACCT(ACCT_CLIENT));
#ifdef WINDOWS
if (names->exe_name != NULL)
copy->names.exe_name = dr_strdup(names->exe_name HEAPACCT(ACCT_CLIENT));
if (names->rsrc_name != NULL)
copy->names.rsrc_name = dr_strdup(names->rsrc_name HEAPACCT(ACCT_CLIENT));
copy->file_version = file_version;
copy->product_version = product_version;
copy->checksum = checksum;
copy->timestamp = timestamp;
copy->module_internal_size = mod_size;
#else
copy->contiguous = contiguous;
copy->num_segments = num_segments;
copy->segments = (module_segment_data_t *)
HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, module_segment_data_t,
num_segments, ACCT_VMAREAS, PROTECTED);
if (os_segments != NULL) {
ASSERT(segments == NULL);
for (i = 0; i < num_segments; i++) {
copy->segments[i].start = os_segments[i].start;
copy->segments[i].end = os_segments[i].end;
copy->segments[i].prot = os_segments[i].prot;
}
} else {
ASSERT(segments != NULL);
if (segments != NULL)
memcpy(copy->segments, segments, num_segments*sizeof(module_segment_data_t));
}
copy->timestamp = timestamp;
# ifdef MACOS
copy->current_version = current_version;
copy->compatibility_version = compatibility_version;
memcpy(copy->uuid, uuid, sizeof(copy->uuid));
# endif
#endif
return copy;
}
module_data_t *
copy_module_area_to_module_data(const module_area_t *area)
{
if (area == NULL)
return NULL;
return create_and_initialize_module_data(area->start, area->end, area->entry_point,
0, &area->names, area->full_path
#ifdef WINDOWS
, area->os_data.file_version,
area->os_data.product_version,
area->os_data.checksum,
area->os_data.timestamp,
area->os_data.module_internal_size
#else
, area->os_data.contiguous,
area->os_data.num_segments,
area->os_data.segments,
NULL,
area->os_data.timestamp
# ifdef MACOS
, area->os_data.current_version,
area->os_data.compatibility_version,
area->os_data.uuid
# endif
#endif
);
}
DR_API
/* Makes a copy of a module_data_t for returning to the client. We return a copy so
* we don't have to hold the module areas list lock while in the client (xref PR 225020).
* Note - dr_data is allowed to be NULL. */
module_data_t *
dr_copy_module_data(const module_data_t *data)
{
if (data == NULL)
return NULL;
return create_and_initialize_module_data(data->start, data->end, data->entry_point,
0, &data->names, data->full_path
#ifdef WINDOWS
, data->file_version,
data->product_version,
data->checksum, data->timestamp,
data->module_internal_size
#else
, data->contiguous,
data->num_segments,
NULL,
data->segments,
data->timestamp
# ifdef MACOS
, data->current_version,
data->compatibility_version,
data->uuid
# endif
#endif
);
}
DR_API
/* Used to free a module_data_t created by dr_copy_module_data() */
void
dr_free_module_data(module_data_t *data)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (data == NULL)
return;
if (dcontext != NULL && data == dcontext->client_data->no_delete_mod_data) {
CLIENT_ASSERT(false, "dr_free_module_data: don\'t free module_data passed to "
"the image load or image unload event callbacks.");
return;
}
#ifdef UNIX
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, data->segments, module_segment_data_t,
data->num_segments, ACCT_VMAREAS, PROTECTED);
#endif
if (data->full_path != NULL)
dr_strfree(data->full_path HEAPACCT(ACCT_CLIENT));
free_module_names(&data->names HEAPACCT(ACCT_CLIENT));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, data, module_data_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
bool
dr_module_contains_addr(const module_data_t *data, app_pc addr)
{
/* XXX: this duplicates module_contains_addr(), but we have two different
* data structures (module_area_t and module_data_t) so it's hard to share.
*/
#ifdef WINDOWS
return (addr >= data->start && addr < data->end);
#else
if (data->contiguous)
return (addr >= data->start && addr < data->end);
else {
uint i;
for (i = 0; i < data->num_segments; i++) {
if (addr >= data->segments[i].start && addr < data->segments[i].end)
return true;
}
}
return false;
#endif
}
/* Looks up module containing pc (assumed to be fully loaded).
* If it exists and its client module load event has not been called, calls it.
*/
void
instrument_module_load_trigger(app_pc pc)
{
if (CLIENTS_EXIST()) {
module_area_t *ma;
module_data_t *client_data = NULL;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) {
/* switch to write lock */
os_get_module_info_unlock();
os_get_module_info_write_lock();
ma = module_pc_lookup(pc);
if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) {
ma->flags |= MODULE_LOAD_EVENT;
client_data = copy_module_area_to_module_data(ma);
os_get_module_info_write_unlock();
instrument_module_load(client_data, true/*i#884: already loaded*/);
dr_free_module_data(client_data);
} else
os_get_module_info_write_unlock();
} else
os_get_module_info_unlock();
}
}
/* Notify user when a module is loaded */
void
instrument_module_load(module_data_t *data, bool previously_loaded)
{
/* Note - during DR initialization this routine is called before we've set up a
* dcontext for the main thread and before we've called instrument_init. It's okay
* since there's no way a callback will be registered and we'll return immediately. */
dcontext_t *dcontext;
if (module_load_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_load_callbacks, int (*)(void *, module_data_t *, bool),
(void *)dcontext, data, previously_loaded);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* Notify user when a module is unloaded */
void
instrument_module_unload(module_data_t *data)
{
dcontext_t *dcontext;
if (module_unload_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_unload_callbacks, int (*)(void *, module_data_t *),
(void *)dcontext, data);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* returns whether this sysnum should be intercepted */
bool
instrument_filter_syscall(dcontext_t *dcontext, int sysnum)
{
bool ret = false;
/* if client does not filter then we don't intercept anything */
if (filter_syscall_callbacks.num == 0)
return ret;
/* if any client wants to intercept, then we intercept */
call_all_ret(ret, =, || ret, filter_syscall_callbacks, bool (*)(void *, int),
(void *)dcontext, sysnum);
return ret;
}
/* returns whether this syscall should execute */
bool
instrument_pre_syscall(dcontext_t *dcontext, int sysnum)
{
bool exec = true;
dcontext->client_data->in_pre_syscall = true;
/* clear flag from dr_syscall_invoke_another() */
dcontext->client_data->invoke_another_syscall = false;
if (pre_syscall_callbacks.num > 0) {
/* Skip syscall if any client wants to skip it, but don't short-circuit,
* as skipping syscalls is usually done when the effect of the syscall
* will be emulated in some other way. The app is typically meant to
* think that the syscall succeeded. Thus, other tool components
* should see the syscall as well (xref i#424).
*/
call_all_ret(exec, =, && exec, pre_syscall_callbacks,
bool (*)(void *, int), (void *)dcontext, sysnum);
}
dcontext->client_data->in_pre_syscall = false;
return exec;
}
void
instrument_post_syscall(dcontext_t *dcontext, int sysnum)
{
if (post_syscall_callbacks.num == 0)
return;
dcontext->client_data->in_post_syscall = true;
call_all(post_syscall_callbacks, int (*)(void *, int),
(void *)dcontext, sysnum);
dcontext->client_data->in_post_syscall = false;
}
bool
instrument_invoke_another_syscall(dcontext_t *dcontext)
{
return dcontext->client_data->invoke_another_syscall;
}
bool
instrument_kernel_xfer(dcontext_t *dcontext, dr_kernel_xfer_type_t type,
os_cxt_ptr_t source_os_cxt, dr_mcontext_t *source_dmc,
priv_mcontext_t *source_mc,
app_pc target_pc, reg_t target_xsp,
os_cxt_ptr_t target_os_cxt, priv_mcontext_t *target_mc,
int sig)
{
if (kernel_xfer_callbacks.num == 0) {
return false;
}
dr_kernel_xfer_info_t info;
info.type = type;
info.source_mcontext = NULL;
info.target_pc = target_pc;
info.target_xsp = target_xsp;
info.sig = sig;
dr_mcontext_t dr_mcontext;
dr_mcontext.size = sizeof(dr_mcontext);
dr_mcontext.flags = DR_MC_CONTROL | DR_MC_INTEGER;
if (source_dmc != NULL)
info.source_mcontext = source_dmc;
else if (source_mc != NULL) {
if (priv_mcontext_to_dr_mcontext(&dr_mcontext, source_mc))
info.source_mcontext = &dr_mcontext;
} else if (!is_os_cxt_ptr_null(source_os_cxt)) {
if (os_context_to_mcontext(&dr_mcontext, NULL, source_os_cxt))
info.source_mcontext = &dr_mcontext;
}
/* Our compromise to reduce context copying is to provide the PC and XSP inline,
* and only get more if the user calls dr_get_mcontext(), which we support again
* without any copying if not used by taking in a raw os_context_t.
*/
dcontext->client_data->os_cxt = target_os_cxt;
dcontext->client_data->cur_mc = target_mc;
call_all(kernel_xfer_callbacks, int (*)(void *, const dr_kernel_xfer_info_t *),
(void *)dcontext, &info);
set_os_cxt_ptr_null(&dcontext->client_data->os_cxt);
dcontext->client_data->cur_mc = NULL;
return true;
}
#ifdef WINDOWS
/* Notify user of exceptions. Note: not called for RaiseException */
bool
instrument_exception(dcontext_t *dcontext, dr_exception_t *exception)
{
bool res = true;
/* Ensure that dr_get_mcontext() called from instrument_kernel_xfer() from
* dr_redirect_execution() will get the source context.
* cur_mc will later be clobbered by instrument_kernel_xfer() which is ok:
* the redirect ends the callback calling.
*/
dcontext->client_data->cur_mc = dr_mcontext_as_priv_mcontext(exception->mcontext);
/* We short-circuit if any client wants to "own" the fault and not pass on.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own it (xref i#424).
*/
call_all_ret(res, = res &&, , exception_callbacks,
bool (*)(void *, dr_exception_t *),
(void *)dcontext, exception);
dcontext->client_data->cur_mc = NULL;
return res;
}
#else
dr_signal_action_t
instrument_signal(dcontext_t *dcontext, dr_siginfo_t *siginfo)
{
dr_signal_action_t ret = DR_SIGNAL_DELIVER;
/* We short-circuit if any client wants to do other than deliver to the app.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own the signal (xref i#424).
*/
call_all_ret(ret, = ret == DR_SIGNAL_DELIVER ? , : ret, signal_callbacks,
dr_signal_action_t (*)(void *, dr_siginfo_t *),
(void *)dcontext, siginfo);
return ret;
}
bool
dr_signal_hook_exists(void)
{
return (signal_callbacks.num > 0);
}
#endif /* WINDOWS */
#ifdef PROGRAM_SHEPHERDING
/* Notify user when a security violation is detected */
void
instrument_security_violation(dcontext_t *dcontext, app_pc target_pc,
security_violation_t violation, action_type_t *action)
{
dr_security_violation_type_t dr_violation;
dr_security_violation_action_t dr_action, dr_action_original;
app_pc source_pc = NULL;
fragment_t *last;
dr_mcontext_t dr_mcontext;
dr_mcontext_init(&dr_mcontext);
if (security_violation_callbacks.num == 0)
return;
if (!priv_mcontext_to_dr_mcontext(&dr_mcontext, get_mcontext(dcontext)))
return;
/* FIXME - the source_tag, source_pc, and context can all be incorrect if the
* violation ends up occurring in the middle of a bb we're building. See case
* 7380 which we should fix in interp.c.
*/
/* Obtain the source addr to pass to the client. xref case 285 --
* we're using the more heavy-weight solution 2) here, but that
* should be okay since we already have the overhead of calling
* into the client. */
last = dcontext->last_fragment;
if (!TEST(FRAG_FAKE, last->flags)) {
cache_pc pc = EXIT_CTI_PC(last, dcontext->last_exit);
source_pc = recreate_app_pc(dcontext, pc, last);
}
/* FIXME - set pc field of dr_mcontext_t. We'll probably want it
* for thread start and possibly apc/callback events as well.
*/
switch (violation) {
case STACK_EXECUTION_VIOLATION:
dr_violation = DR_RCO_STACK_VIOLATION;
break;
case HEAP_EXECUTION_VIOLATION:
dr_violation = DR_RCO_HEAP_VIOLATION;
break;
case RETURN_TARGET_VIOLATION:
dr_violation = DR_RCT_RETURN_VIOLATION;
break;
case RETURN_DIRECT_RCT_VIOLATION:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
case INDIRECT_CALL_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_CALL_VIOLATION;
break;
case INDIRECT_JUMP_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_JUMP_VIOLATION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
}
switch (*action) {
case ACTION_TERMINATE_PROCESS:
dr_action = DR_VIOLATION_ACTION_KILL_PROCESS;
break;
case ACTION_CONTINUE:
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
case ACTION_TERMINATE_THREAD:
dr_action = DR_VIOLATION_ACTION_KILL_THREAD;
break;
case ACTION_THROW_EXCEPTION:
dr_action = DR_VIOLATION_ACTION_THROW_EXCEPTION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
}
dr_action_original = dr_action;
/* NOTE - last->tag should be valid here (even if the frag is fake since the
* coarse wrappers set the tag). FIXME - for traces we really want the bb tag not
* the trace tag, should get that. Of course the only real reason we pass source
* tag is because we can't always give a valid source_pc. */
/* Note that the last registered function gets the final crack at
* changing the action.
*/
call_all(security_violation_callbacks,
int (*)(void *, void *, app_pc, app_pc, dr_security_violation_type_t,
dr_mcontext_t *, dr_security_violation_action_t *),
(void *)dcontext, last->tag, source_pc, target_pc,
dr_violation, &dr_mcontext, &dr_action);
if (dr_action != dr_action_original) {
switch (dr_action) {
case DR_VIOLATION_ACTION_KILL_PROCESS:
*action = ACTION_TERMINATE_PROCESS;
break;
case DR_VIOLATION_ACTION_KILL_THREAD:
*action = ACTION_TERMINATE_THREAD;
break;
case DR_VIOLATION_ACTION_THROW_EXCEPTION:
*action = ACTION_THROW_EXCEPTION;
break;
case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT:
/* FIXME - not safe to implement till case 7380 is fixed. */
CLIENT_ASSERT(false, "action DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT "
"not yet supported.");
/* note - no break, fall through */
case DR_VIOLATION_ACTION_CONTINUE:
*action = ACTION_CONTINUE;
break;
default:
CLIENT_ASSERT(false, "Security violation event callback returned invalid "
"action value.");
}
}
}
#endif
/* Notify the client of a nudge. */
void
instrument_nudge(dcontext_t *dcontext, client_id_t id, uint64 arg)
{
size_t i;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT &&
dcontext == get_thread_private_dcontext());
/* synch_with_all_threads and flush API assume that client nudge threads
* hold no dr locks and are !couldbelinking while in client lib code */
ASSERT_OWN_NO_LOCKS();
ASSERT(!is_couldbelinking(dcontext));
/* find the client the nudge is intended for */
for (i=0; i<num_client_libs; i++) {
/* until we have nudge-arg support (PR 477454), nudges target the 1st client */
if (IF_VMX86_ELSE(true, client_libs[i].id == id)) {
break;
}
}
if (i == num_client_libs || client_libs[i].nudge_callbacks.num == 0)
return;
#ifdef WINDOWS
/* count the number of nudge events so we can make sure they're
* all finished before exiting
*/
mutex_lock(&client_thread_count_lock);
if (block_client_nudge_threads) {
/* FIXME - would be nice if there was a way to let the external agent know that
* the nudge event wasn't delivered (but this only happens when the process
* is detaching or exiting). */
mutex_unlock(&client_thread_count_lock);
return;
}
/* atomic to avoid locking around the dec */
ATOMIC_INC(int, num_client_nudge_threads);
mutex_unlock(&client_thread_count_lock);
/* We need to mark this as a client controlled thread for synch_with_all_threads
* and otherwise treat it as native. Xref PR 230836 on what to do if this
* thread hits native_exec_syscalls hooks.
* XXX: this requires extra checks for "not a nudge thread" after IS_CLIENT_THREAD
* in get_stack_bounds() and instrument_thread_exit_event(): maybe better
* to have synchall checks do extra checks and have IS_CLIENT_THREAD be
* false for nudge threads at exit time?
*/
dcontext->client_data->is_client_thread = true;
dcontext->thread_record->under_dynamo_control = false;
#else
/* support calling dr_get_mcontext() on this thread. the app
* context should be intact in the current mcontext except
* pc which we set from next_tag.
*/
CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext,
"internal inconsistency in where mcontext is");
dcontext->client_data->mcontext_in_dcontext = true;
/* officially get_mcontext() doesn't always set pc: we do anyway */
get_mcontext(dcontext)->pc = dcontext->next_tag;
#endif
call_all(client_libs[i].nudge_callbacks, int (*)(void *, uint64),
(void *)dcontext, arg);
#ifdef UNIX
dcontext->client_data->mcontext_in_dcontext = false;
#else
dcontext->thread_record->under_dynamo_control = true;
dcontext->client_data->is_client_thread = false;
ATOMIC_DEC(int, num_client_nudge_threads);
#endif
}
int
get_num_client_threads(void)
{
int num = IF_WINDOWS_ELSE(num_client_nudge_threads, 0);
# ifdef CLIENT_SIDELINE
num += num_client_sideline_threads;
# endif
return num;
}
#ifdef WINDOWS
/* wait for all nudges to finish */
void
wait_for_outstanding_nudges()
{
/* block any new nudge threads from starting */
mutex_lock(&client_thread_count_lock);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
block_client_nudge_threads = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
DOLOG(1, LOG_TOP, {
if (num_client_nudge_threads > 0) {
LOG(GLOBAL, LOG_TOP, 1,
"Waiting for %d nudges to finish - app is about to kill all threads "
"except the current one.\n", num_client_nudge_threads);
}
});
/* don't wait if the client requested exit: after all the client might
* have done so from a nudge, and if the client does want to exit it's
* its own problem if it misses nudges (and external nudgers should use
* a finite timeout)
*/
if (client_requested_exit) {
mutex_unlock(&client_thread_count_lock);
return;
}
while (num_client_nudge_threads > 0) {
/* yield with lock released to allow nudges to finish */
mutex_unlock(&client_thread_count_lock);
dr_thread_yield();
mutex_lock(&client_thread_count_lock);
}
mutex_unlock(&client_thread_count_lock);
}
#endif /* WINDOWS */
/****************************************************************************/
/* EXPORTED ROUTINES */
DR_API
/* Creates a DR context that can be used in a standalone program.
* WARNING: this context cannot be used as the drcontext for a thread
* running under DR control! It is only for standalone programs that
* wish to use DR as a library of disassembly, etc. routines.
*/
void *
dr_standalone_init(void)
{
dcontext_t *dcontext = standalone_init();
return (void *) dcontext;
}
DR_API
/* Aborts the process immediately */
void
dr_abort(void)
{
if (TEST(DUMPCORE_DR_ABORT, dynamo_options.dumpcore_mask))
os_dump_core("dr_abort");
os_terminate(NULL, TERMINATE_PROCESS);
}
DR_API
void
dr_exit_process(int exit_code)
{
dcontext_t *dcontext = get_thread_private_dcontext();
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
/* Prevent cleanup from waiting for nudges as this may be called
* from a nudge!
* Also suppress leak asserts, as it's hard to clean up from
* some situations (such as DrMem -crash_at_error).
*/
client_requested_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
#ifdef WINDOWS
if (dcontext != NULL && dcontext->nudge_target != NULL) {
/* we need to free the nudge thread stack which may involved
* switching stacks so we have the nudge thread invoke
* os_terminate for us
*/
nudge_thread_cleanup(dcontext, true/*kill process*/, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
#endif
if (!is_currently_on_dstack(dcontext)
IF_UNIX(&& !is_currently_on_sigaltstack(dcontext))) {
/* if on app stack or sigaltstack, avoid incorrect leak assert at exit */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_api_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* to keep properly nested */
}
os_terminate_with_code(dcontext, /* dcontext is required */
TERMINATE_CLEANUP|TERMINATE_PROCESS, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
DR_API
bool
dr_create_memory_dump(dr_memory_dump_spec_t *spec)
{
if (spec->size != sizeof(dr_memory_dump_spec_t))
return false;
#ifdef WINDOWS
if (TEST(DR_MEMORY_DUMP_LDMP, spec->flags))
return os_dump_core_live(spec->label, spec->ldmp_path, spec->ldmp_path_size);
#endif
return false;
}
DR_API
/* Returns true if all DynamoRIO caches are thread private. */
bool
dr_using_all_private_caches(void)
{
return !SHARED_FRAGMENTS_ENABLED();
}
DR_API
void
dr_request_synchronized_exit(void)
{
SYSLOG_INTERNAL_WARNING_ONCE("dr_request_synchronized_exit deprecated: "
"use dr_set_process_exit_behavior instead");
}
DR_API
void
dr_set_process_exit_behavior(dr_exit_flags_t flags)
{
if ((!DYNAMO_OPTION(multi_thread_exit) && TEST(DR_EXIT_MULTI_THREAD, flags)) ||
(DYNAMO_OPTION(multi_thread_exit) && !TEST(DR_EXIT_MULTI_THREAD, flags))) {
options_make_writable();
dynamo_options.multi_thread_exit = TEST(DR_EXIT_MULTI_THREAD, flags);
options_restore_readonly();
}
if ((!DYNAMO_OPTION(skip_thread_exit_at_exit) &&
TEST(DR_EXIT_SKIP_THREAD_EXIT, flags)) ||
(DYNAMO_OPTION(skip_thread_exit_at_exit) &&
!TEST(DR_EXIT_SKIP_THREAD_EXIT, flags))) {
options_make_writable();
dynamo_options.skip_thread_exit_at_exit = TEST(DR_EXIT_SKIP_THREAD_EXIT, flags);
options_restore_readonly();
}
}
void
dr_allow_unsafe_static_behavior(void)
{
loader_allow_unsafe_static_behavior();
}
DR_API
/* Returns the option string passed along with a client path via DR's
* -client_lib option.
*/
/* i#1736: we now token-delimit with quotes, but for backward compat we need to
* pass a version w/o quotes for dr_get_options().
*/
const char *
dr_get_options(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
/* If we already converted, pass the result */
if (client_libs[i].legacy_options[0] != '\0' ||
client_libs[i].options[0] == '\0')
return client_libs[i].legacy_options;
/* For backward compatibility, we need to remove the token-delimiting
* quotes. We tokenize, and then re-assemble the flat string.
* i#1755: however, for legacy custom frontends that are not re-quoting
* like drrun now is, we need to avoid removing any quotes from the
* original strings. We try to detect this by assuming a frontend will
* either re-quote everything or nothing. Ideally we would check all
* args, but that would require plumbing info from getword() or
* duplicating its functionality: so instead our heuristic is just checking
* the first and last chars.
*/
if (!char_is_quote(client_libs[i].options[0]) ||
/* Emptry string already detected above */
!char_is_quote(client_libs[i].options[strlen(client_libs[i].
options)-1])) {
/* At least one arg is not quoted => better use original */
snprintf(client_libs[i].legacy_options,
BUFFER_SIZE_ELEMENTS(client_libs[i].legacy_options),
"%s", client_libs[i].options);
} else {
int j;
size_t sofar = 0;
for (j = 1/*skip client lib*/; j < client_libs[i].argc; j++) {
if (!print_to_buffer(client_libs[i].legacy_options,
BUFFER_SIZE_ELEMENTS(client_libs[i].
legacy_options),
&sofar, "%s%s", (j == 1) ? "" : " ",
client_libs[i].argv[j]))
break;
}
}
NULL_TERMINATE_BUFFER(client_libs[i].legacy_options);
return client_libs[i].legacy_options;
}
}
CLIENT_ASSERT(false, "dr_get_options(): invalid client id");
return NULL;
}
DR_API
bool
dr_get_option_array(client_id_t id, int *argc OUT, const char ***argv OUT)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
*argc = client_libs[i].argc;
*argv = client_libs[i].argv;
return true;
}
}
CLIENT_ASSERT(false, "dr_get_option_array(): invalid client id");
return false;
}
DR_API
/* Returns the path to the client library. Client must pass its ID */
const char *
dr_get_client_path(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].path;
}
}
CLIENT_ASSERT(false, "dr_get_client_path(): invalid client id");
return NULL;
}
DR_API
byte *
dr_get_client_base(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].start;
}
}
CLIENT_ASSERT(false, "dr_get_client_base(): invalid client id");
return NULL;
}
DR_API
bool
dr_set_client_name(const char *name, const char *report_URL)
{
/* Although set_exception_strings() accepts NULL, clients should pass real vals. */
if (name == NULL || report_URL == NULL)
return false;
set_exception_strings(name, report_URL);
return true;
}
bool
dr_set_client_version_string(const char *version)
{
if (version == NULL)
return false;
set_display_version(version);
return true;
}
DR_API const char *
dr_get_application_name(void)
{
#ifdef UNIX
return get_application_short_name();
#else
return get_application_short_unqualified_name();
#endif
}
DR_API process_id_t
dr_get_process_id(void)
{
return (process_id_t) get_process_id();
}
#ifdef UNIX
DR_API
process_id_t
dr_get_parent_id(void)
{
return get_parent_id();
}
#endif
#ifdef WINDOWS
DR_API
process_id_t
dr_convert_handle_to_pid(HANDLE process_handle)
{
ASSERT(POINTER_MAX == INVALID_PROCESS_ID);
return process_id_from_handle(process_handle);
}
DR_API
HANDLE
dr_convert_pid_to_handle(process_id_t pid)
{
return process_handle_from_id(pid);
}
DR_API
/**
* Returns information about the version of the operating system.
* Returns whether successful.
*/
bool
dr_get_os_version(dr_os_version_info_t *info)
{
int ver;
uint sp_major, sp_minor;
get_os_version_ex(&ver, &sp_major, &sp_minor);
if (info->size > offsetof(dr_os_version_info_t, version)) {
switch (ver) {
case WINDOWS_VERSION_10_1709: info->version = DR_WINDOWS_VERSION_10_1709; break;
case WINDOWS_VERSION_10_1703: info->version = DR_WINDOWS_VERSION_10_1703; break;
case WINDOWS_VERSION_10_1607: info->version = DR_WINDOWS_VERSION_10_1607; break;
case WINDOWS_VERSION_10_1511: info->version = DR_WINDOWS_VERSION_10_1511; break;
case WINDOWS_VERSION_10: info->version = DR_WINDOWS_VERSION_10; break;
case WINDOWS_VERSION_8_1: info->version = DR_WINDOWS_VERSION_8_1; break;
case WINDOWS_VERSION_8: info->version = DR_WINDOWS_VERSION_8; break;
case WINDOWS_VERSION_7: info->version = DR_WINDOWS_VERSION_7; break;
case WINDOWS_VERSION_VISTA: info->version = DR_WINDOWS_VERSION_VISTA; break;
case WINDOWS_VERSION_2003: info->version = DR_WINDOWS_VERSION_2003; break;
case WINDOWS_VERSION_XP: info->version = DR_WINDOWS_VERSION_XP; break;
case WINDOWS_VERSION_2000: info->version = DR_WINDOWS_VERSION_2000; break;
case WINDOWS_VERSION_NT: info->version = DR_WINDOWS_VERSION_NT; break;
default: CLIENT_ASSERT(false, "unsupported windows version");
};
} else
return false; /* struct too small for any info */
if (info->size > offsetof(dr_os_version_info_t, service_pack_major)) {
info->service_pack_major = sp_major;
if (info->size > offsetof(dr_os_version_info_t, service_pack_minor)) {
info->service_pack_minor = sp_minor;
}
}
return true;
}
DR_API
bool
dr_is_wow64(void)
{
return is_wow64_process(NT_CURRENT_PROCESS);
}
DR_API
void *
dr_get_app_PEB(void)
{
return get_own_peb();
}
#endif
DR_API
/* Retrieves the current time */
void
dr_get_time(dr_time_t *time)
{
convert_millis_to_date(query_time_millis(), time);
}
DR_API
uint64
dr_get_milliseconds(void)
{
return query_time_millis();
}
DR_API
uint64
dr_get_microseconds(void)
{
return query_time_micros();
}
DR_API
uint
dr_get_random_value(uint max)
{
return (uint) get_random_offset(max);
}
DR_API
void
dr_set_random_seed(uint seed)
{
set_random_seed(seed);
}
DR_API
uint
dr_get_random_seed(void)
{
return get_random_seed();
}
/***************************************************************************
* MEMORY ALLOCATION
*
* XXX i#774: once we split vmheap from vmcode, we need to make
* dr_thread_alloc(), dr_global_alloc(), and dr_nonheap_alloc()
* all allocate vmcode-reachable memory. Library-redirected
* allocations do not need to be reachable.
*/
DR_API
/* Allocates memory from DR's memory pool specific to the
* thread associated with drcontext.
*/
void *
dr_thread_alloc(void *drcontext, size_t size)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
return heap_alloc(dcontext, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees thread-specific memory allocated by dr_thread_alloc.
* size must be the same size passed to dr_thread_alloc.
*/
void
dr_thread_free(void *drcontext, void *mem, size_t size)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_thread_free: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_thread_free: drcontext is invalid");
heap_free(dcontext, mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Allocates memory from DR's global memory pool.
*/
void *
dr_global_alloc(size_t size)
{
return global_heap_alloc(size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees memory allocated by dr_global_alloc.
* size must be the same size passed to dr_global_alloc.
*/
void
dr_global_free(void *mem, size_t size)
{
global_heap_free(mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* PR 352427: API routine to allocate executable memory */
void *
dr_nonheap_alloc(size_t size, uint prot)
{
return heap_mmap_ex(size, size, prot, false/*no guard pages*/, VMM_SPECIAL_MMAP);
}
DR_API
void
dr_nonheap_free(void *mem, size_t size)
{
heap_munmap_ex(mem, size, false/*no guard pages*/, VMM_SPECIAL_MMAP);
}
static void *
raw_mem_alloc(size_t size, uint prot, void *addr, dr_alloc_flags_t flags)
{
byte *p;
heap_error_code_t error_code;
CLIENT_ASSERT(ALIGNED(addr, PAGE_SIZE), "addr is not page size aligned");
if (!TEST(DR_ALLOC_NON_DR, flags)) {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
addr = (void *)ALIGN_BACKWARD(addr, PAGE_SIZE);
size = ALIGN_FORWARD(size, PAGE_SIZE);
#ifdef WINDOWS
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"cannot combine commit-only and low-2GB");
p = os_heap_reserve_in_region(NULL, (byte *)(ptr_uint_t)0x80000000, size,
&error_code, TEST(DR_MEMPROT_EXEC, flags));
if (p != NULL && !TEST(DR_ALLOC_RESERVE_ONLY, flags)) {
if (!os_heap_commit(p, size, prot, &error_code)) {
os_heap_free(p, size, &error_code);
p = NULL;
}
}
} else
#endif
{
/* We specify that DR_ALLOC_LOW_2GB only applies to x64, so it's
* ok that the Linux kernel will ignore MAP_32BIT for 32-bit.
*/
#ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
#else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY :
(TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
#endif
if (IF_WINDOWS(TEST(DR_ALLOC_COMMIT_ONLY, flags) &&)
addr != NULL &&
!app_memory_pre_alloc(get_thread_private_dcontext(), addr, size, prot,
false))
p = NULL;
else
p = os_raw_mem_alloc(addr, size, prot, os_flags, &error_code);
}
if (p != NULL) {
if (TEST(DR_ALLOC_NON_DR, flags)) {
all_memory_areas_lock();
update_all_memory_areas(p, p+size, prot, DR_MEMTYPE_DATA);
all_memory_areas_unlock();
} else {
/* this routine updates allmem for us: */
add_dynamo_vm_area((app_pc)p, ((app_pc)p)+size, prot,
true _IF_DEBUG("fls cb in private lib"));
}
RSTATS_ADD_PEAK(client_raw_mmap_size, size);
}
if (!TEST(DR_ALLOC_NON_DR, flags))
dynamo_vm_areas_unlock();
return p;
}
static bool
raw_mem_free(void *addr, size_t size, dr_alloc_flags_t flags)
{
bool res;
heap_error_code_t error_code;
byte *p = addr;
#ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
#else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY :
(TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
#endif
size = ALIGN_FORWARD(size, PAGE_SIZE);
if (TEST(DR_ALLOC_NON_DR, flags)) {
/* use lock to avoid racy update on parallel memory allocation,
* e.g. allocation from another thread at p happens after os_heap_free
* but before remove_from_all_memory_areas
*/
all_memory_areas_lock();
} else {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
res = os_raw_mem_free(p, size, os_flags, &error_code);
if (TEST(DR_ALLOC_NON_DR, flags)) {
remove_from_all_memory_areas(p, p + size);
all_memory_areas_unlock();
} else {
/* this routine updates allmem for us: */
remove_dynamo_vm_area((app_pc)addr, ((app_pc)addr)+size);
}
if (!TEST(DR_ALLOC_NON_DR, flags))
dynamo_vm_areas_unlock();
if (res)
RSTATS_SUB(client_raw_mmap_size, size);
return res;
}
DR_API
void *
dr_raw_mem_alloc(size_t size, uint prot, void *addr)
{
return raw_mem_alloc(size, prot, addr, DR_ALLOC_NON_DR);
}
DR_API
bool
dr_raw_mem_free(void *addr, size_t size)
{
return raw_mem_free(addr, size, DR_ALLOC_NON_DR);
}
static void *
custom_memory_shared(bool alloc, void *drcontext, dr_alloc_flags_t flags, size_t size,
uint prot, void *addr, bool *free_res)
{
CLIENT_ASSERT(alloc || free_res != NULL, "must ask for free_res on free");
CLIENT_ASSERT(alloc || addr != NULL, "cannot free NULL");
CLIENT_ASSERT(!TESTALL(DR_ALLOC_NON_DR|DR_ALLOC_CACHE_REACHABLE, flags),
"dr_custom_alloc: cannot combine non-DR and cache-reachable");
CLIENT_ASSERT(!alloc || TEST(DR_ALLOC_FIXED_LOCATION, flags) || addr == NULL,
"dr_custom_alloc: address only honored for fixed location");
#ifdef WINDOWS
CLIENT_ASSERT(!TESTANY(DR_ALLOC_RESERVE_ONLY | DR_ALLOC_COMMIT_ONLY, flags) ||
TESTALL(DR_ALLOC_NON_HEAP|DR_ALLOC_NON_DR, flags),
"dr_custom_alloc: reserve/commit-only are only for non-DR non-heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_RESERVE_ONLY, flags) ||
!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: cannot combine reserve-only + commit-only");
#endif
if (TEST(DR_ALLOC_NON_HEAP, flags)) {
CLIENT_ASSERT(drcontext == NULL,
"dr_custom_alloc: drcontext must be NULL for non-heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_THREAD_PRIVATE, flags),
"dr_custom_alloc: non-heap cannot be thread-private");
CLIENT_ASSERT(!TESTALL(DR_ALLOC_CACHE_REACHABLE|DR_ALLOC_LOW_2GB, flags),
"dr_custom_alloc: cannot combine low-2GB and cache-reachable");
#ifdef WINDOWS
CLIENT_ASSERT(addr != NULL || !TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: commit-only requires non-NULL addr");
#endif
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
#ifdef WINDOWS
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: cannot combine commit-only and low-2GB");
#endif
CLIENT_ASSERT(!alloc || addr == NULL,
"dr_custom_alloc: cannot pass an addr with low-2GB");
/* Even if not non-DR, easier to allocate via raw */
if (alloc)
return raw_mem_alloc(size, prot, addr, flags);
else
*free_res = raw_mem_free(addr, size, flags);
} else if (TEST(DR_ALLOC_NON_DR, flags)) {
/* ok for addr to be NULL */
if (alloc)
return raw_mem_alloc(size, prot, addr, flags);
else
*free_res = raw_mem_free(addr, size, flags);
} else { /* including DR_ALLOC_CACHE_REACHABLE */
CLIENT_ASSERT(!alloc || !TEST(DR_ALLOC_CACHE_REACHABLE, flags) ||
addr == NULL,
"dr_custom_alloc: cannot ask for addr and cache-reachable");
/* This flag is here solely so we know which version of free to call */
if (TEST(DR_ALLOC_FIXED_LOCATION, flags)) {
CLIENT_ASSERT(addr != NULL,
"dr_custom_alloc: fixed location requires an address");
if (alloc)
return raw_mem_alloc(size, prot, addr, 0);
else
*free_res = raw_mem_free(addr, size, 0);
} else {
if (alloc)
return dr_nonheap_alloc(size, prot);
else {
*free_res = true;
dr_nonheap_free(addr, size);
}
}
}
} else {
if (!alloc)
*free_res = true;
CLIENT_ASSERT(!alloc || addr == NULL,
"dr_custom_alloc: cannot pass an addr for heap memory");
CLIENT_ASSERT(drcontext == NULL || TEST(DR_ALLOC_THREAD_PRIVATE, flags),
"dr_custom_alloc: drcontext must be NULL for global heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_LOW_2GB, flags),
"dr_custom_alloc: cannot ask for heap in low 2GB");
CLIENT_ASSERT(!TEST(DR_ALLOC_NON_DR, flags),
"dr_custom_alloc: cannot ask for non-DR heap memory");
/* for now it's all cache-reachable so we ignore DR_ALLOC_CACHE_REACHABLE */
if (TEST(DR_ALLOC_THREAD_PRIVATE, flags)) {
if (alloc)
return dr_thread_alloc(drcontext, size);
else
dr_thread_free(drcontext, addr, size);
} else {
if (alloc)
return dr_global_alloc(size);
else
dr_global_free(addr, size);
}
}
return NULL;
}
DR_API
void *
dr_custom_alloc(void *drcontext, dr_alloc_flags_t flags, size_t size,
uint prot, void *addr)
{
return custom_memory_shared(true, drcontext, flags, size, prot, addr, NULL);
}
DR_API
bool
dr_custom_free(void *drcontext, dr_alloc_flags_t flags, void *addr, size_t size)
{
bool res;
custom_memory_shared(false, drcontext, flags, size, 0, addr, &res);
return res;
}
#ifdef UNIX
DR_API
/* With ld's -wrap option, we can supply a replacement for malloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_malloc(size_t size)
{
return redirect_malloc(size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for realloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_realloc(void *mem, size_t size)
{
return redirect_realloc(mem, size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for calloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_calloc(size_t nmemb, size_t size)
{
return redirect_calloc(nmemb, size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for free. This
* routine frees memory allocated by __wrap_alloc and expects the
* allocation size to be available in the few bytes before 'mem'.
*/
void
__wrap_free(void *mem)
{
redirect_free(mem);
}
#endif
DR_API
bool
dr_memory_protect(void *base, size_t size, uint new_prot)
{
/* We do allow the client to modify DR memory, for allocating a
* region and later making it unwritable. We should probably
* allow modifying ntdll, since our general model is to trust the
* client and let it shoot itself in the foot, but that would require
* passing in extra args to app_memory_protection_change() to ignore
* the patch_proof_list: and maybe it is safer to disallow client
* from putting hooks in ntdll.
*/
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (!dynamo_vm_area_overlap(base, ((byte *)base) + size)) {
uint mod_prot = new_prot;
uint res = app_memory_protection_change(get_thread_private_dcontext(),
base, size, new_prot, &mod_prot, NULL);
if (res != DO_APP_MEM_PROT_CHANGE) {
if (res == FAIL_APP_MEM_PROT_CHANGE ||
res == PRETEND_APP_MEM_PROT_CHANGE) {
return false;
} else {
/* SUBSET_APP_MEM_PROT_CHANGE should only happen for
* PROGRAM_SHEPHERDING. FIXME: not sure how common
* this will be: for now we just fail.
*/
return false;
}
}
CLIENT_ASSERT(mod_prot == new_prot, "internal error on dr_memory_protect()");
}
return set_protection(base, size, new_prot);
}
DR_API
size_t
dr_page_size(void)
{
return os_page_size();
}
DR_API
/* checks to see that all bytes with addresses from pc to pc+size-1
* are readable and that reading from there won't generate an exception.
*/
bool
dr_memory_is_readable(const byte *pc, size_t size)
{
return is_readable_without_exception(pc, size);
}
DR_API
/* OS neutral memory query for clients, just wrapper around our get_memory_info(). */
bool
dr_query_memory(const byte *pc, byte **base_pc, size_t *size, uint *prot)
{
uint real_prot;
bool res;
#if defined(UNIX) && defined(HAVE_MEMINFO)
/* xref PR 246897 - the cached all memory list can have problems when
* out-of-process entities change the mapings. For now we use the from
* os version instead (even though it's slower, and only if we have
* HAVE_MEMINFO_MAPS support). FIXME
* XXX i#853: We could decide allmem vs os with the use_all_memory_areas
* option.
*/
res = get_memory_info_from_os(pc, base_pc, size, &real_prot);
#else
res = get_memory_info(pc, base_pc, size, &real_prot);
#endif
if (prot != NULL) {
if (is_pretend_or_executable_writable((app_pc)pc)) {
/* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod
* as executable-but-writable and we'll come here.
*/
real_prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE;
}
*prot = real_prot;
}
return res;
}
DR_API
bool
dr_query_memory_ex(const byte *pc, OUT dr_mem_info_t *info)
{
bool res;
#if defined(UNIX) && defined(HAVE_MEMINFO)
/* PR 246897: all_memory_areas not ready for prime time */
res = query_memory_ex_from_os(pc, info);
#else
res = query_memory_ex(pc, info);
#endif
if (is_pretend_or_executable_writable((app_pc)pc)) {
/* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod
* as executable-but-writable and we'll come here.
*/
info->prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE;
}
return res;
}
DR_API
/* Wrapper around our safe_read. Xref P4 198875, placeholder till we have try/except */
bool
dr_safe_read(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
return safe_read_ex(base, size, out_buf, bytes_read);
}
DR_API
/* Wrapper around our safe_write. Xref P4 198875, placeholder till we have try/except */
bool
dr_safe_write(void *base, size_t size, const void *in_buf, size_t *bytes_written)
{
return safe_write_ex(base, size, in_buf, bytes_written);
}
DR_API
void
dr_try_setup(void *drcontext, void **try_cxt)
{
/* Yes we're duplicating the code from the TRY() macro but this
* provides better abstraction and lets us change our impl later
* vs exposing that macro
*/
dcontext_t *dcontext = (dcontext_t *) drcontext;
try_except_context_t *try_state;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext());
ASSERT(try_cxt != NULL);
/* We allocate on the heap to avoid having to expose the try_except_context_t
* and dr_jmp_buf_t structs and be tied to their exact layouts.
* The client is likely to allocate memory inside the try anyway
* if doing a decode or something.
*/
try_state = (try_except_context_t *)
HEAP_TYPE_ALLOC(dcontext, try_except_context_t, ACCT_CLIENT, PROTECTED);
*try_cxt = try_state;
try_state->prev_context = dcontext->try_except.try_except_state;
dcontext->try_except.try_except_state = try_state;
}
/* dr_try_start() is in x86.asm since we can't have an extra frame that's
* going to be torn down between the longjmp and the restore point
*/
DR_API
void
dr_try_stop(void *drcontext, void *try_cxt)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
try_except_context_t *try_state = (try_except_context_t *) try_cxt;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext());
ASSERT(try_state != NULL);
POP_TRY_BLOCK(&dcontext->try_except, *try_state);
HEAP_TYPE_FREE(dcontext, try_state, try_except_context_t, ACCT_CLIENT, PROTECTED);
}
DR_API
bool
dr_memory_is_dr_internal(const byte *pc)
{
return is_dynamo_address((app_pc)pc);
}
DR_API
bool
dr_memory_is_in_client(const byte *pc)
{
return is_in_client_lib((app_pc)pc);
}
void
instrument_client_lib_loaded(byte *start, byte *end)
{
/* i#852: include Extensions as they are really part of the clients and
* aren't like other private libs.
* XXX: we only avoid having the client libs on here b/c they're specified via
* full path and don't go through the loaders' locate routines.
* Not a big deal if they do end up on here: if they always did we could
* remove the linear walk in is_in_client_lib().
*/
/* called prior to instrument_init() */
init_client_aux_libs();
vmvector_add(client_aux_libs, start, end, NULL/*not an auxlib*/);
}
void
instrument_client_lib_unloaded(byte *start, byte *end)
{
/* called after instrument_exit() */
if (client_aux_libs != NULL)
vmvector_remove(client_aux_libs, start, end);
}
/**************************************************
* CLIENT AUXILIARY LIBRARIES
*/
DR_API
dr_auxlib_handle_t
dr_load_aux_library(const char *name,
byte **lib_start /*OPTIONAL OUT*/,
byte **lib_end /*OPTIONAL OUT*/)
{
byte *start, *end;
dr_auxlib_handle_t lib = load_shared_library(name, true/*reachable*/);
if (shared_library_bounds(lib, NULL, name, &start, &end)) {
/* be sure to replace b/c i#852 now adds during load w/ empty data */
vmvector_add_replace(client_aux_libs, start, end, (void*)lib);
if (lib_start != NULL)
*lib_start = start;
if (lib_end != NULL)
*lib_end = end;
all_memory_areas_lock();
update_all_memory_areas(start, end,
/* XXX: see comment in instrument_init()
* on walking the sections and what prot to use
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
} else {
unload_shared_library(lib);
lib = NULL;
}
return lib;
}
DR_API
dr_auxlib_routine_ptr_t
dr_lookup_aux_library_routine(dr_auxlib_handle_t lib, const char *name)
{
if (lib == NULL)
return NULL;
return lookup_library_routine(lib, name);
}
DR_API
bool
dr_unload_aux_library(dr_auxlib_handle_t lib)
{
byte *start = NULL, *end = NULL;
/* unfortunately on linux w/ dlopen we cannot find the bounds w/o
* either the path or an address so we iterate.
* once we have our private loader we shouldn't need this:
* XXX i#157
*/
vmvector_iterator_t vmvi;
dr_auxlib_handle_t found = NULL;
if (lib == NULL)
return false;
vmvector_iterator_start(client_aux_libs, &vmvi);
while (vmvector_iterator_hasnext(&vmvi)) {
found = (dr_auxlib_handle_t) vmvector_iterator_next(&vmvi, &start, &end);
if (found == lib)
break;
}
vmvector_iterator_stop(&vmvi);
if (found == lib) {
CLIENT_ASSERT(start != NULL && start < end, "logic error");
vmvector_remove(client_aux_libs, start, end);
unload_shared_library(lib);
all_memory_areas_lock();
update_all_memory_areas(start, end, MEMPROT_NONE, DR_MEMTYPE_FREE);
all_memory_areas_unlock();
return true;
} else {
CLIENT_ASSERT(false, "invalid aux lib");
return false;
}
}
#if defined(WINDOWS) && !defined(X64)
/* XXX i#1633: these routines all have 64-bit handle and routine types for
* handling win8's high ntdll64 in the future. For now the implementation
* treats them as 32-bit types and we do not support win8+.
*/
DR_API
dr_auxlib64_handle_t
dr_load_aux_x64_library(const char *name)
{
HANDLE h;
/* We use the x64 system loader. We assume that x64 state is fine being
* interrupted at arbitrary points during x86 execution, and that there
* is little risk of transparency violations.
*/
/* load_library_64() is racy. We don't expect anyone else to load
* x64 libs, but another thread in this client could, so we
* serialize here.
*/
mutex_lock(&client_aux_lib64_lock);
/* XXX: if we switch to our private loader we'll need to add custom
* search support to look in 64-bit system dir
*/
/* XXX: I'd add to the client_aux_libs vector, but w/ the system loader
* loading this I don't know all the dependent libs it might load.
* Not bothering for now.
*/
h = load_library_64(name);
mutex_unlock(&client_aux_lib64_lock);
return (dr_auxlib64_handle_t) h;
}
DR_API
dr_auxlib64_routine_ptr_t
dr_lookup_aux_x64_library_routine(dr_auxlib64_handle_t lib, const char *name)
{
uint64 res = get_proc_address_64((uint64)lib, name);
return (dr_auxlib64_routine_ptr_t) res;
}
DR_API
bool
dr_unload_aux_x64_library(dr_auxlib64_handle_t lib)
{
bool res;
mutex_lock(&client_aux_lib64_lock);
res = free_library_64((HANDLE)(uint)lib); /* uint cast to avoid cl warning */
mutex_unlock(&client_aux_lib64_lock);
return res;
}
#endif
/***************************************************************************
* LOCKS
*/
DR_API
/* Initializes a mutex
*/
void *
dr_mutex_create(void)
{
void *mutex = (void *)HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, mutex_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_LOCK_FREE(*((mutex_t *) mutex), dr_client_mutex);
return mutex;
}
DR_API
/* Deletes mutex
*/
void
dr_mutex_destroy(void *mutex)
{
/* Delete mutex so locks_not_closed()==0 test in dynamo.c passes */
DELETE_LOCK(*((mutex_t *) mutex));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (mutex_t *)mutex, mutex_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
/* Locks mutex
*/
void
dr_mutex_lock(void *mutex)
{
dcontext_t *dcontext = get_thread_private_dcontext();
/* set client_grab_mutex so that we know to set client_thread_safe_for_synch
* around the actual wait for the lock */
if (IS_CLIENT_THREAD(dcontext)) {
dcontext->client_data->client_grab_mutex = mutex;
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
dcontext->client_data->mutex_count++;
}
mutex_lock((mutex_t *) mutex);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_grab_mutex = NULL;
}
DR_API
/* Unlocks mutex
*/
void
dr_mutex_unlock(void *mutex)
{
dcontext_t *dcontext = get_thread_private_dcontext();
mutex_unlock((mutex_t *) mutex);
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
if (IS_CLIENT_THREAD(dcontext)) {
CLIENT_ASSERT(dcontext->client_data->mutex_count > 0,
"internal client mutex nesting error");
dcontext->client_data->mutex_count--;
}
}
DR_API
/* Tries once to grab the lock, returns whether or not successful
*/
bool
dr_mutex_trylock(void *mutex)
{
bool success = false;
dcontext_t *dcontext = get_thread_private_dcontext();
/* set client_grab_mutex so that we know to set client_thread_safe_for_synch
* around the actual wait for the lock */
if (IS_CLIENT_THREAD(dcontext)) {
dcontext->client_data->client_grab_mutex = mutex;
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
dcontext->client_data->mutex_count++;
}
success = mutex_trylock((mutex_t *) mutex);
if (IS_CLIENT_THREAD(dcontext)) {
if (!success)
dcontext->client_data->mutex_count--;
dcontext->client_data->client_grab_mutex = NULL;
}
return success;
}
DR_API
bool
dr_mutex_self_owns(void *mutex)
{
return IF_DEBUG_ELSE(OWN_MUTEX((mutex_t *)mutex), true);
}
DR_API
bool
dr_mutex_mark_as_app(void *mutex)
{
mutex_t *lock = (mutex_t *) mutex;
mutex_mark_as_app(lock);
return true;
}
DR_API
void *
dr_rwlock_create(void)
{
void *rwlock = (void *) HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, read_write_lock_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_READWRITE_LOCK_FREE(*((read_write_lock_t *)rwlock), dr_client_mutex);
return rwlock;
}
DR_API
void
dr_rwlock_destroy(void *rwlock)
{
DELETE_READWRITE_LOCK(*((read_write_lock_t *) rwlock));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (read_write_lock_t *)rwlock, read_write_lock_t,
ACCT_CLIENT, UNPROTECTED);
}
DR_API
void
dr_rwlock_read_lock(void *rwlock)
{
read_lock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_read_unlock(void *rwlock)
{
read_unlock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_write_lock(void *rwlock)
{
write_lock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_write_unlock(void *rwlock)
{
write_unlock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_write_trylock(void *rwlock)
{
return write_trylock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_self_owns_write_lock(void *rwlock)
{
return self_owns_write_lock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_mark_as_app(void *rwlock)
{
read_write_lock_t *lock = (read_write_lock_t *) rwlock;
mutex_mark_as_app(&lock->lock);
return true;
}
DR_API
void *
dr_recurlock_create(void)
{
void *reclock = (void *) HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, recursive_lock_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_RECURSIVE_LOCK_FREE(*((recursive_lock_t *)reclock), dr_client_mutex);
return reclock;
}
DR_API
void
dr_recurlock_destroy(void *reclock)
{
DELETE_RECURSIVE_LOCK(*((recursive_lock_t *) reclock));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (recursive_lock_t *)reclock, recursive_lock_t,
ACCT_CLIENT, UNPROTECTED);
}
DR_API
void
dr_recurlock_lock(void *reclock)
{
acquire_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
void
dr_app_recurlock_lock(void *reclock, dr_mcontext_t *mc)
{
CLIENT_ASSERT(mc->flags == DR_MC_ALL,
"mcontext must be for DR_MC_ALL");
acquire_recursive_app_lock((recursive_lock_t *)reclock,
dr_mcontext_as_priv_mcontext(mc));
}
DR_API
void
dr_recurlock_unlock(void *reclock)
{
release_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_trylock(void *reclock)
{
return try_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_self_owns(void *reclock)
{
return self_owns_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_mark_as_app(void *reclock)
{
recursive_lock_t *lock = (recursive_lock_t *) reclock;
mutex_mark_as_app(&lock->lock);
return true;
}
DR_API
void *
dr_event_create(void)
{
return (void *)create_event();
}
DR_API
bool
dr_event_destroy(void *event)
{
destroy_event((event_t)event);
return true;
}
DR_API
bool
dr_event_wait(void *event)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
wait_for_event((event_t)event, 0);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
return true;
}
DR_API
bool
dr_event_signal(void *event)
{
signal_event((event_t)event);
return true;
}
DR_API
bool
dr_event_reset(void *event)
{
reset_event((event_t)event);
return true;
}
DR_API
bool
dr_mark_safe_to_suspend(void *drcontext, bool enter)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
ASSERT_OWN_NO_LOCKS();
/* We need to return so we can't call check_wait_at_safe_spot().
* We don't set mcontext b/c noone should examine it.
*/
if (enter)
set_synch_state(dcontext, THREAD_SYNCH_NO_LOCKS_NO_XFER);
else
set_synch_state(dcontext, THREAD_SYNCH_NONE);
return true;
}
DR_API
int
dr_atomic_add32_return_sum(volatile int *x, int val)
{
return atomic_add_exchange_int(x, val);
}
/***************************************************************************
* MODULES
*/
DR_API
/* Looks up the module data containing pc. Returns NULL if not found.
* Returned module_data_t must be freed with dr_free_module_data(). */
module_data_t *
dr_lookup_module(byte *pc)
{
module_area_t *area;
module_data_t *client_data;
os_get_module_info_lock();
area = module_pc_lookup(pc);
client_data = copy_module_area_to_module_data(area);
os_get_module_info_unlock();
return client_data;
}
DR_API
module_data_t *
dr_get_main_module(void)
{
return dr_lookup_module(get_image_entry());
}
DR_API
/* Looks up the module with name matching name (ignoring case). Returns NULL if not
* found. Returned module_data_t must be freed with dr_free_module_data(). */
module_data_t *
dr_lookup_module_by_name(const char *name)
{
/* We have no quick way of doing this since our module list is indexed by pc. We
* could use get_module_handle() but that's dangerous to call at arbitrary times,
* so we just walk our full list here. */
module_iterator_t *mi = module_iterator_start();
CLIENT_ASSERT((name != NULL), "dr_lookup_module_info_by_name: null name");
while (module_iterator_hasnext(mi)) {
module_area_t *area = module_iterator_next(mi);
module_data_t *client_data;
const char *modname = GET_MODULE_NAME(&area->names);
if (modname != NULL && strcasecmp(modname, name) == 0) {
client_data = copy_module_area_to_module_data(area);
module_iterator_stop(mi);
return client_data;
}
}
module_iterator_stop(mi);
return NULL;
}
typedef struct _client_mod_iterator_list_t {
module_data_t *info;
struct _client_mod_iterator_list_t *next;
} client_mod_iterator_list_t;
typedef struct {
client_mod_iterator_list_t *current;
client_mod_iterator_list_t *full_list;
} client_mod_iterator_t;
DR_API
/* Initialize a new client module iterator. */
dr_module_iterator_t *
dr_module_iterator_start(void)
{
client_mod_iterator_t *client_iterator = (client_mod_iterator_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_mod_iterator_t, ACCT_CLIENT, UNPROTECTED);
module_iterator_t *dr_iterator = module_iterator_start();
memset(client_iterator, 0, sizeof(*client_iterator));
while (module_iterator_hasnext(dr_iterator)) {
module_area_t *area = module_iterator_next(dr_iterator);
client_mod_iterator_list_t *list = (client_mod_iterator_list_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_mod_iterator_list_t, ACCT_CLIENT,
UNPROTECTED);
ASSERT(area != NULL);
list->info = copy_module_area_to_module_data(area);
list->next = NULL;
if (client_iterator->current == NULL) {
client_iterator->current = list;
client_iterator->full_list = client_iterator->current;
} else {
client_iterator->current->next = list;
client_iterator->current = client_iterator->current->next;
}
}
module_iterator_stop(dr_iterator);
client_iterator->current = client_iterator->full_list;
return (dr_module_iterator_t)client_iterator;
}
DR_API
/* Returns true if there is another loaded module in the iterator. */
bool
dr_module_iterator_hasnext(dr_module_iterator_t *mi)
{
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_hasnext: null iterator");
return ((client_mod_iterator_t *)mi)->current != NULL;
}
DR_API
/* Retrieves the module_data_t for the next loaded module in the iterator. */
module_data_t *
dr_module_iterator_next(dr_module_iterator_t *mi)
{
module_data_t *data;
client_mod_iterator_t *ci = (client_mod_iterator_t *)mi;
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_next: null iterator");
CLIENT_ASSERT((ci->current != NULL), "dr_module_iterator_next: has no next, use "
"dr_module_iterator_hasnext() first");
if (ci->current == NULL)
return NULL;
data = ci->current->info;
ci->current = ci->current->next;
return data;
}
DR_API
/* Free the module iterator. */
void
dr_module_iterator_stop(dr_module_iterator_t *mi)
{
client_mod_iterator_t *ci = (client_mod_iterator_t *)mi;
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_stop: null iterator");
/* free module_data_t's we didn't give to the client */
while (ci->current != NULL) {
dr_free_module_data(ci->current->info);
ci->current = ci->current->next;
}
ci->current = ci->full_list;
while (ci->current != NULL) {
client_mod_iterator_list_t *next = ci->current->next;
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci->current, client_mod_iterator_list_t,
ACCT_CLIENT, UNPROTECTED);
ci->current = next;
}
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci, client_mod_iterator_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
/* Get the name dr uses for this module. */
const char *
dr_module_preferred_name(const module_data_t *data)
{
if (data == NULL)
return NULL;
return GET_MODULE_NAME(&data->names);
}
#ifdef WINDOWS
DR_API
/* If pc is within a section of module lib returns true and (optionally) a copy of
* the IMAGE_SECTION_HEADER in section_out. If pc is not within a section of the
* module mod return false. */
bool
dr_lookup_module_section(module_handle_t lib, byte *pc, IMAGE_SECTION_HEADER *section_out)
{
CLIENT_ASSERT((lib != NULL), "dr_lookup_module_section: null module_handle_t");
return module_pc_section_lookup((app_pc)lib, pc, section_out);
}
#endif
/* i#805: Instead of exposing multiple instruction levels, we expose a way for
* clients to turn off instrumentation. Then DR can avoid a full decode and we
* can save some time on modules that are not interesting.
* XXX: This breaks other clients and extensions, in particular drwrap, which
* can miss call and return sites in the uninstrumented module.
*/
DR_API
bool
dr_module_set_should_instrument(module_handle_t handle, bool should_instrument)
{
module_area_t *ma;
DEBUG_DECLARE(dcontext_t *dcontext = get_thread_private_dcontext());
IF_DEBUG(executable_areas_lock());
os_get_module_info_write_lock();
ma = module_pc_lookup((byte*)handle);
if (ma != NULL) {
/* This kind of obviates the need for handle, but it makes the API more
* explicit.
*/
CLIENT_ASSERT(dcontext->client_data->no_delete_mod_data->handle == handle,
"Do not call dr_module_set_should_instrument() outside "
"of the module's own load event");
ASSERT(!executable_vm_area_executed_from(ma->start, ma->end));
if (should_instrument) {
ma->flags &= ~MODULE_NULL_INSTRUMENT;
} else {
ma->flags |= MODULE_NULL_INSTRUMENT;
}
}
os_get_module_info_write_unlock();
IF_DEBUG(executable_areas_unlock());
return (ma != NULL);
}
DR_API
bool
dr_module_should_instrument(module_handle_t handle)
{
bool should_instrument = true;
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup((byte*)handle);
CLIENT_ASSERT(ma != NULL, "invalid module handle");
if (ma != NULL) {
should_instrument = !TEST(MODULE_NULL_INSTRUMENT, ma->flags);
}
os_get_module_info_unlock();
return should_instrument;
}
DR_API
/* Returns the entry point of the function with the given name in the module
* with the given handle.
* We're not taking in module_data_t to make it simpler for the client
* to iterate or lookup the module_data_t, store the single-field
* handle, and then free the data right away: besides, module_data_t
* is not an opaque type.
*/
generic_func_t
dr_get_proc_address(module_handle_t lib, const char *name)
{
#ifdef WINDOWS
return get_proc_address_resolve_forward(lib, name);
#else
return get_proc_address(lib, name);
#endif
}
DR_API
bool
dr_get_proc_address_ex(module_handle_t lib, const char *name,
dr_export_info_t *info OUT, size_t info_len)
{
/* If we add new fields we'll check various values of info_len */
if (info == NULL || info_len < sizeof(*info))
return false;
#ifdef WINDOWS
info->address = get_proc_address_resolve_forward(lib, name);
info->is_indirect_code = false;
#else
info->address = get_proc_address_ex(lib, name, &info->is_indirect_code);
#endif
return (info->address != NULL);
}
byte *
dr_map_executable_file(const char *filename, dr_map_executable_flags_t flags,
size_t *size OUT)
{
#ifdef MACOS
/* XXX i#1285: implement private loader on Mac */
return NULL;
#else
modload_flags_t mflags = MODLOAD_NOT_PRIVLIB;
if (TEST(DR_MAPEXE_SKIP_WRITABLE, flags))
mflags |= MODLOAD_SKIP_WRITABLE;
if (filename == NULL)
return NULL;
return privload_map_and_relocate(filename, size, mflags);
#endif
}
bool
dr_unmap_executable_file(byte *base, size_t size)
{
return unmap_file(base, size);
}
DR_API
/* Creates a new directory. Fails if the directory already exists
* or if it can't be created.
*/
bool
dr_create_dir(const char *fname)
{
return os_create_dir(fname, CREATE_DIR_REQUIRE_NEW);
}
DR_API
bool
dr_delete_dir(const char *fname)
{
return os_delete_dir(fname);
}
DR_API
bool
dr_get_current_directory(char *buf, size_t bufsz)
{
return os_get_current_dir(buf, bufsz);
}
DR_API
/* Checks existence of a directory. */
bool
dr_directory_exists(const char *fname)
{
return os_file_exists(fname, true);
}
DR_API
/* Checks for the existence of a file. */
bool
dr_file_exists(const char *fname)
{
return os_file_exists(fname, false);
}
DR_API
/* Opens a file in the mode specified by mode_flags.
* Returns INVALID_FILE if unsuccessful
*/
file_t
dr_open_file(const char *fname, uint mode_flags)
{
uint flags = 0;
if (TEST(DR_FILE_WRITE_REQUIRE_NEW, mode_flags)) {
flags |= OS_OPEN_WRITE | OS_OPEN_REQUIRE_NEW;
}
if (TEST(DR_FILE_WRITE_APPEND, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE | OS_OPEN_APPEND;
}
if (TEST(DR_FILE_WRITE_OVERWRITE, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE;
}
if (TEST(DR_FILE_WRITE_ONLY, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE_ONLY;
}
if (TEST(DR_FILE_READ, mode_flags))
flags |= OS_OPEN_READ;
CLIENT_ASSERT((flags != 0), "dr_open_file: no mode selected");
if (TEST(DR_FILE_ALLOW_LARGE, mode_flags))
flags |= OS_OPEN_ALLOW_LARGE;
if (TEST(DR_FILE_CLOSE_ON_FORK, mode_flags))
flags |= OS_OPEN_CLOSE_ON_FORK;
/* all client-opened files are protected */
return os_open_protected(fname, flags);
}
DR_API
/* Closes file f
*/
void
dr_close_file(file_t f)
{
/* all client-opened files are protected */
os_close_protected(f);
}
DR_API
/* Renames the file src to dst. */
bool
dr_rename_file(const char *src, const char *dst, bool replace)
{
return os_rename_file(src, dst, replace);
}
DR_API
/* Deletes a file. */
bool
dr_delete_file(const char *filename)
{
/* os_delete_mapped_file should be a superset of os_delete_file, so we use
* it.
*/
return os_delete_mapped_file(filename);
}
DR_API
/* Flushes any buffers for file f
*/
void
dr_flush_file(file_t f)
{
os_flush(f);
}
DR_API
/* Writes count bytes from buf to f.
* Returns the actual number written.
*/
ssize_t
dr_write_file(file_t f, const void *buf, size_t count)
{
#ifdef WINDOWS
if ((f == STDOUT || f == STDERR) && print_to_console)
return dr_write_to_console_varg(f == STDOUT, "%.*s", count, buf);
else
#endif
return os_write(f, buf, count);
}
DR_API
/* Reads up to count bytes from f into buf.
* Returns the actual number read.
*/
ssize_t
dr_read_file(file_t f, void *buf, size_t count)
{
return os_read(f, buf, count);
}
DR_API
/* sets the current file position for file f to offset bytes from the specified origin
* returns true if successful */
bool
dr_file_seek(file_t f, int64 offset, int origin)
{
CLIENT_ASSERT(origin == DR_SEEK_SET || origin == DR_SEEK_CUR || origin == DR_SEEK_END,
"dr_file_seek: invalid origin value");
return os_seek(f, offset, origin);
}
DR_API
/* gets the current file position for file f in bytes from start of file */
int64
dr_file_tell(file_t f)
{
return os_tell(f);
}
DR_API
file_t
dr_dup_file_handle(file_t f)
{
#ifdef UNIX
/* returns -1 on failure == INVALID_FILE */
return dup_syscall(f);
#else
HANDLE ht = INVALID_HANDLE_VALUE;
NTSTATUS res = duplicate_handle(NT_CURRENT_PROCESS, f, NT_CURRENT_PROCESS,
&ht, SYNCHRONIZE, 0,
DUPLICATE_SAME_ACCESS|DUPLICATE_SAME_ATTRIBUTES);
if (!NT_SUCCESS(res))
return INVALID_FILE;
else
return ht;
#endif
}
DR_API
bool
dr_file_size(file_t fd, OUT uint64 *size)
{
return os_get_file_size_by_handle(fd, size);
}
DR_API
void *
dr_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
uint flags)
{
return (void *)
map_file(f, size, offs, addr, prot,
(TEST(DR_MAP_PRIVATE, flags) ? MAP_FILE_COPY_ON_WRITE : 0) |
IF_WINDOWS((TEST(DR_MAP_IMAGE, flags) ? MAP_FILE_IMAGE : 0) |)
IF_UNIX((TEST(DR_MAP_FIXED, flags) ? MAP_FILE_FIXED : 0) |)
(TEST(DR_MAP_CACHE_REACHABLE, flags) ? MAP_FILE_REACHABLE : 0));
}
DR_API
bool
dr_unmap_file(void *map, size_t size)
{
dr_mem_info_t info;
CLIENT_ASSERT(ALIGNED(map, PAGE_SIZE),
"dr_unmap_file: map is not page aligned");
if (!dr_query_memory_ex(map, &info) /* fail to query */ ||
info.type == DR_MEMTYPE_FREE /* not mapped file */) {
CLIENT_ASSERT(false, "dr_unmap_file: incorrect file map");
return false;
}
#ifdef WINDOWS
/* On Windows, the whole file will be unmapped instead, so we adjust
* the bound to make sure vm_areas are updated correctly.
*/
map = info.base_pc;
if (info.type == DR_MEMTYPE_IMAGE) {
size = get_allocation_size(map, NULL);
} else
size = info.size;
#endif
return unmap_file((byte *) map, size);
}
DR_API
void
dr_log(void *drcontext, uint mask, uint level, const char *fmt, ...)
{
#ifdef DEBUG
dcontext_t *dcontext = (dcontext_t *) drcontext;
va_list ap;
if (stats != NULL &&
((stats->logmask & mask) == 0 ||
stats->loglevel < level))
return;
va_start(ap, fmt);
if (dcontext != NULL)
do_file_write(dcontext->logfile, fmt, ap);
else
do_file_write(main_logfile, fmt, ap);
va_end(ap);
#else
return; /* no logging if not debug */
#endif
}
DR_API
/* Returns the log file for the drcontext thread.
* If drcontext is NULL, returns the main log file.
*/
file_t
dr_get_logfile(void *drcontext)
{
#ifdef DEBUG
dcontext_t *dcontext = (dcontext_t *) drcontext;
if (dcontext != NULL)
return dcontext->logfile;
else
return main_logfile;
#else
return INVALID_FILE;
#endif
}
DR_API
/* Returns true iff the -stderr_mask runtime option is non-zero, indicating
* that the user wants notification messages printed to stderr.
*/
bool
dr_is_notify_on(void)
{
return (dynamo_options.stderr_mask != 0);
}
#ifdef WINDOWS
DR_API file_t
dr_get_stdout_file(void)
{
return get_stdout_handle();
}
DR_API file_t
dr_get_stderr_file(void)
{
return get_stderr_handle();
}
DR_API file_t
dr_get_stdin_file(void)
{
return get_stdin_handle();
}
#endif
#ifdef PROGRAM_SHEPHERDING
DR_API void
dr_write_forensics_report(void *dcontext, file_t file,
dr_security_violation_type_t violation,
dr_security_violation_action_t action,
const char *violation_name)
{
security_violation_t sec_violation;
action_type_t sec_action;
switch (violation) {
case DR_RCO_STACK_VIOLATION:
sec_violation = STACK_EXECUTION_VIOLATION;
break;
case DR_RCO_HEAP_VIOLATION:
sec_violation = HEAP_EXECUTION_VIOLATION;
break;
case DR_RCT_RETURN_VIOLATION:
sec_violation = RETURN_TARGET_VIOLATION;
break;
case DR_RCT_INDIRECT_CALL_VIOLATION:
sec_violation = INDIRECT_CALL_RCT_VIOLATION;
break;
case DR_RCT_INDIRECT_JUMP_VIOLATION:
sec_violation = INDIRECT_JUMP_RCT_VIOLATION;
break;
default:
CLIENT_ASSERT(false, "dr_write_forensics_report does not support "
"DR_UNKNOWN_VIOLATION or invalid violation types");
return;
}
switch (action) {
case DR_VIOLATION_ACTION_KILL_PROCESS:
sec_action = ACTION_TERMINATE_PROCESS;
break;
case DR_VIOLATION_ACTION_CONTINUE:
case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT:
sec_action = ACTION_CONTINUE;
break;
case DR_VIOLATION_ACTION_KILL_THREAD:
sec_action = ACTION_TERMINATE_THREAD;
break;
case DR_VIOLATION_ACTION_THROW_EXCEPTION:
sec_action = ACTION_THROW_EXCEPTION;
break;
default:
CLIENT_ASSERT(false, "dr_write_forensics_report invalid action selection");
return;
}
/* FIXME - could use a better message. */
append_diagnostics(file, action_message[sec_action], violation_name, sec_violation);
}
#endif /* PROGRAM_SHEPHERDING */
#ifdef WINDOWS
DR_API void
dr_messagebox(const char *fmt, ...)
{
dcontext_t *dcontext = get_thread_private_dcontext();
char msg[MAX_LOG_LENGTH];
wchar_t wmsg[MAX_LOG_LENGTH];
va_list ap;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
va_start(ap, fmt);
vsnprintf(msg, BUFFER_SIZE_ELEMENTS(msg), fmt, ap);
NULL_TERMINATE_BUFFER(msg);
snwprintf(wmsg, BUFFER_SIZE_ELEMENTS(wmsg), L"%S", msg);
NULL_TERMINATE_BUFFER(wmsg);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
nt_messagebox(wmsg, debugbox_get_title());
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
va_end(ap);
}
static ssize_t
dr_write_to_console(bool to_stdout, const char *fmt, va_list ap)
{
bool res = true;
char msg[MAX_LOG_LENGTH];
uint written = 0;
int len;
HANDLE std;
CLIENT_ASSERT(dr_using_console(), "internal logic error");
ASSERT(priv_kernel32 != NULL &&
kernel32_WriteFile != NULL);
/* kernel32!GetStdHandle(STD_OUTPUT_HANDLE) == our PEB-based get_stdout_handle */
std = (to_stdout ? get_stdout_handle() : get_stderr_handle());
if (std == INVALID_HANDLE_VALUE)
return false;
len = vsnprintf(msg, BUFFER_SIZE_ELEMENTS(msg), fmt, ap);
/* Let user know if message was truncated */
if (len < 0 || len == BUFFER_SIZE_ELEMENTS(msg))
res = false;
NULL_TERMINATE_BUFFER(msg);
/* Make this routine work in all kinds of windows by going through
* kernel32!WriteFile, which will call WriteConsole for us.
*/
res = res &&
kernel32_WriteFile(std, msg, (DWORD) strlen(msg), (LPDWORD) &written, NULL);
return (res ? written : 0);
}
static ssize_t
dr_write_to_console_varg(bool to_stdout, const char *fmt, ...)
{
va_list ap;
ssize_t res;
va_start(ap, fmt);
res = dr_write_to_console(to_stdout, fmt, ap);
va_end(ap);
return res;
}
DR_API
bool
dr_using_console(void)
{
bool res;
if (get_os_version() >= WINDOWS_VERSION_8) {
FILE_FS_DEVICE_INFORMATION device_info;
HANDLE herr = get_stderr_handle();
/* The handle is invalid iff it's a gui app and the parent is a console */
if (herr == INVALID_HANDLE_VALUE) {
module_data_t *app_kernel32 = dr_lookup_module_by_name("kernel32.dll");
if (privload_attach_parent_console(app_kernel32->start) == false) {
dr_free_module_data(app_kernel32);
return false;
}
dr_free_module_data(app_kernel32);
herr = get_stderr_handle();
}
if (nt_query_volume_info(herr, &device_info, sizeof(device_info),
FileFsDeviceInformation) == STATUS_SUCCESS) {
if (device_info.DeviceType == FILE_DEVICE_CONSOLE)
return true;
}
return false;
}
/* We detect cmd window using what kernel32!WriteFile uses: a handle
* having certain bits set.
*/
res = (((ptr_int_t)get_stderr_handle() & 0x10000003) == 0x3);
CLIENT_ASSERT(!res || get_os_version() < WINDOWS_VERSION_8,
"Please report this: Windows 8 does have old-style consoles!");
return res;
}
DR_API
bool
dr_enable_console_printing(void)
{
bool success = false;
/* b/c private loader sets cxt sw code up front based on whether have windows
* priv libs or not, this can only be called during client init()
*/
if (dynamo_initialized) {
CLIENT_ASSERT(false, "dr_enable_console_printing() must be called during init");
return false;
}
/* Direct writes to std handles work on win8+ (xref i#911) but we don't need
* a separate check as the handle is detected as a non-console handle.
*/
if (!dr_using_console())
return true;
if (!INTERNAL_OPTION(private_loader))
return false;
if (!print_to_console) {
if (priv_kernel32 == NULL) {
/* Not using load_shared_library() b/c it won't search paths
* for us. XXX: should add os-shared interface for
* locate-and-load.
*/
priv_kernel32 = (shlib_handle_t)
locate_and_load_private_library("kernel32.dll", false/*!reachable*/);
}
if (priv_kernel32 != NULL && kernel32_WriteFile == NULL) {
module_data_t *app_kernel32 = dr_lookup_module_by_name("kernel32.dll");
kernel32_WriteFile = (kernel32_WriteFile_t)
lookup_library_routine(priv_kernel32, "WriteFile");
/* There is some problem in loading 32 bit kernel32.dll
* when 64 bit kernel32.dll is already loaded. If kernel32 is
* not loaded we can't call privload_console_share because it
* assumes kernel32 is loaded
*/
if (app_kernel32 == NULL) {
success = false;
} else {
success = privload_console_share(priv_kernel32, app_kernel32->start);
dr_free_module_data(app_kernel32);
}
}
/* We go ahead and cache whether dr_using_console(). If app really
* changes its console, client could call this routine again
* as a workaround. Seems unlikely: better to have better perf.
*/
print_to_console = (priv_kernel32 != NULL &&
kernel32_WriteFile != NULL && success);
}
return print_to_console;
}
#endif /* WINDOWS */
DR_API void
dr_printf(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
#ifdef WINDOWS
if (print_to_console)
dr_write_to_console(true/*stdout*/, fmt, ap);
else
#endif
do_file_write(STDOUT, fmt, ap);
va_end(ap);
}
DR_API ssize_t
dr_vfprintf(file_t f, const char *fmt, va_list ap)
{
ssize_t written;
#ifdef WINDOWS
if ((f == STDOUT || f == STDERR) && print_to_console) {
written = dr_write_to_console(f == STDOUT, fmt, ap);
if (written <= 0)
written = -1;
} else
#endif
written = do_file_write(f, fmt, ap);
return written;
}
DR_API ssize_t
dr_fprintf(file_t f, const char *fmt, ...)
{
va_list ap;
ssize_t res;
va_start(ap, fmt);
res = dr_vfprintf(f, fmt, ap);
va_end(ap);
return res;
}
DR_API int
dr_snprintf(char *buf, size_t max, const char *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
/* PR 219380: we use our_vsnprintf instead of ntdll._vsnprintf b/c the
* latter does not support floating point.
* Plus, our_vsnprintf returns -1 for > max chars (matching Windows
* behavior, but which Linux libc version does not do).
*/
res = our_vsnprintf(buf, max, fmt, ap);
va_end(ap);
return res;
}
DR_API int
dr_vsnprintf(char *buf, size_t max, const char *fmt, va_list ap)
{
return our_vsnprintf(buf, max, fmt, ap);
}
DR_API int
dr_snwprintf(wchar_t *buf, size_t max, const wchar_t *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
res = our_vsnprintf_wide(buf, max, fmt, ap);
va_end(ap);
return res;
}
DR_API int
dr_vsnwprintf(wchar_t *buf, size_t max, const wchar_t *fmt, va_list ap)
{
return our_vsnprintf_wide(buf, max, fmt, ap);
}
DR_API int
dr_sscanf(const char *str, const char *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
res = our_vsscanf(str, fmt, ap);
va_end(ap);
return res;
}
DR_API const char *
dr_get_token(const char *str, char *buf, size_t buflen)
{
/* We don't indicate whether any truncation happened. The
* reasoning is that this is meant to be used on a string of known
* size ahead of time, so the max size for any one token is known.
*/
const char *pos = str;
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(buflen), "buflen too large");
if (parse_word(str, &pos, buf, (uint)buflen) == NULL)
return NULL;
else
return pos;
}
DR_API void
dr_print_instr(void *drcontext, file_t f, instr_t *instr, const char *msg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_print_instr: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT || standalone_library,
"dr_print_instr: drcontext is invalid");
dr_fprintf(f, "%s "PFX" ", msg, instr_get_translation(instr));
instr_disassemble(dcontext, instr, f);
dr_fprintf(f, "\n");
}
DR_API void
dr_print_opnd(void *drcontext, file_t f, opnd_t opnd, const char *msg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_print_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT || standalone_library,
"dr_print_opnd: drcontext is invalid");
dr_fprintf(f, "%s ", msg);
opnd_disassemble(dcontext, opnd, f);
dr_fprintf(f, "\n");
}
/***************************************************************************
* Thread support
*/
DR_API
/* Returns the DR context of the current thread */
void *
dr_get_current_drcontext(void)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
return (void *) dcontext;
}
DR_API thread_id_t
dr_get_thread_id(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_thread_id: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_thread_id: drcontext is invalid");
return dcontext->owning_thread;
}
#ifdef WINDOWS
/* Added for DrMem i#1254 */
DR_API HANDLE
dr_get_dr_thread_handle(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_thread_id: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_thread_id: drcontext is invalid");
return dcontext->thread_record->handle;
}
#endif
DR_API void *
dr_get_tls_field(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_tls_field: drcontext is invalid");
return dcontext->client_data->user_field;
}
DR_API void
dr_set_tls_field(void *drcontext, void *value)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_set_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_set_tls_field: drcontext is invalid");
dcontext->client_data->user_field = value;
}
DR_API void *
dr_get_dr_segment_base(IN reg_id_t seg)
{
#ifdef AARCHXX
if (seg == dr_reg_stolen)
return os_get_dr_tls_base(get_thread_private_dcontext());
else
return NULL;
#else
return get_segment_base(seg);
#endif
}
DR_API
bool
dr_raw_tls_calloc(OUT reg_id_t *tls_register,
OUT uint *offset,
IN uint num_slots,
IN uint alignment)
{
CLIENT_ASSERT(tls_register != NULL,
"dr_raw_tls_calloc: tls_register cannot be NULL");
CLIENT_ASSERT(offset != NULL,
"dr_raw_tls_calloc: offset cannot be NULL");
*tls_register = IF_X86_ELSE(SEG_TLS, dr_reg_stolen);
if (num_slots == 0)
return true;
return os_tls_calloc(offset, num_slots, alignment);
}
DR_API
bool
dr_raw_tls_cfree(uint offset, uint num_slots)
{
if (num_slots == 0)
return true;
return os_tls_cfree(offset, num_slots);
}
DR_API
opnd_t
dr_raw_tls_opnd(void *drcontext, reg_id_t tls_register, uint tls_offs)
{
CLIENT_ASSERT(drcontext != NULL, "dr_raw_tls_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_raw_tls_opnd: drcontext is invalid");
IF_X86_ELSE({
return opnd_create_far_base_disp_ex(tls_register, DR_REG_NULL, DR_REG_NULL,
0, tls_offs, OPSZ_PTR,
/* modern processors don't want addr16
* prefixes
*/
false, true, false);
}, {
return OPND_CREATE_MEMPTR(tls_register, tls_offs);
});
}
DR_API
void
dr_insert_read_raw_tls(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t tls_register, uint tls_offs, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_read_raw_tls: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
IF_X86_ELSE({
MINSERT(ilist, where, INSTR_CREATE_mov_ld
(dcontext, opnd_create_reg(reg),
dr_raw_tls_opnd(drcontext, tls_register, tls_offs)));
}, {
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
dr_raw_tls_opnd(drcontext, tls_register, tls_offs)));
});
}
DR_API
void
dr_insert_write_raw_tls(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t tls_register, uint tls_offs, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_write_raw_tls: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
IF_X86_ELSE({
MINSERT(ilist, where, INSTR_CREATE_mov_st
(dcontext,
dr_raw_tls_opnd(drcontext, tls_register, tls_offs),
opnd_create_reg(reg)));
}, {
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, dr_raw_tls_opnd(drcontext, tls_register, tls_offs),
opnd_create_reg(reg)));
});
}
DR_API
/* Current thread gives up its time quantum. */
void
dr_thread_yield(void)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
else
dcontext->client_data->at_safe_to_terminate_syscall = true;
os_thread_yield();
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
else
dcontext->client_data->at_safe_to_terminate_syscall = false;
}
DR_API
/* Current thread sleeps for time_ms milliseconds. */
void
dr_sleep(int time_ms)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
else
dcontext->client_data->at_safe_to_terminate_syscall = true;
os_thread_sleep(time_ms);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
else
dcontext->client_data->at_safe_to_terminate_syscall = false;
}
#ifdef CLIENT_SIDELINE
DR_API
bool
dr_client_thread_set_suspendable(bool suspendable)
{
/* see notes in synch_with_all_threads() */
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (!IS_CLIENT_THREAD(dcontext))
return false;
dcontext->client_data->suspendable = suspendable;
return true;
}
#endif
DR_API
bool
dr_suspend_all_other_threads_ex(OUT void ***drcontexts,
OUT uint *num_suspended,
OUT uint *num_unsuspended,
dr_suspend_flags_t flags)
{
uint out_suspended = 0, out_unsuspended = 0;
thread_record_t **threads;
int num_threads;
dcontext_t *my_dcontext = get_thread_private_dcontext();
int i;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(OWN_NO_LOCKS(my_dcontext),
"dr_suspend_all_other_threads cannot be called while holding a lock");
CLIENT_ASSERT(drcontexts != NULL && num_suspended != NULL,
"dr_suspend_all_other_threads invalid params");
LOG(GLOBAL, LOG_FRAGMENT, 2,
"\ndr_suspend_all_other_threads: thread "TIDFMT" suspending all threads\n",
get_thread_id());
/* suspend all DR-controlled threads at safe locations */
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER,
&threads, &num_threads, THREAD_SYNCH_NO_LOCKS_NO_XFER,
/* if we fail to suspend a thread (e.g., for
* privilege reasons), ignore and continue
*/
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE)) {
LOG(GLOBAL, LOG_FRAGMENT, 2,
"\ndr_suspend_all_other_threads: failed to suspend every thread\n");
/* some threads may have been successfully suspended so we must return
* their info so they'll be resumed. I believe there is thus no
* scenario under which we return false.
*/
}
/* now we own the thread_initexit_lock */
CLIENT_ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock),
"internal locking error");
/* To avoid two passes we allocate the array now. It may be larger than
* necessary if we had suspend failures but taht's ok.
* We hide the threads num and array in extra slots.
*/
*drcontexts = (void **)
global_heap_alloc((num_threads+2)*sizeof(dcontext_t*) HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < num_threads; i++) {
dcontext_t *dcontext = threads[i]->dcontext;
if (dcontext != NULL) { /* include my_dcontext here */
if (dcontext != my_dcontext) {
/* must translate BEFORE freeing any memory! */
if (!thread_synch_successful(threads[i])) {
out_unsuspended++;
} else if (is_thread_currently_native(threads[i]) &&
!TEST(DR_SUSPEND_NATIVE, flags)) {
out_unsuspended++;
} else if (thread_synch_state_no_xfer(dcontext)) {
/* FIXME: for all other synchall callers, the app
* context should be sitting in their mcontext, even
* though we can't safely get their native context and
* translate it.
*/
(*drcontexts)[out_suspended] = (void *) dcontext;
out_suspended++;
CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext,
"internal inconsistency in where mcontext is");
/* officially get_mcontext() doesn't always set pc: we do anyway */
get_mcontext(dcontext)->pc = dcontext->next_tag;
dcontext->client_data->mcontext_in_dcontext = true;
} else {
(*drcontexts)[out_suspended] = (void *) dcontext;
out_suspended++;
/* It's not safe to clobber the thread's mcontext with
* its own translation b/c for shared_syscall we store
* the continuation pc in the esi slot.
* We could translate here into heap-allocated memory,
* but some clients may just want to stop
* the world but not examine the threads, so we lazily
* translate in dr_get_mcontext().
*/
CLIENT_ASSERT(!dcontext->client_data->suspended,
"inconsistent usage of dr_suspend_all_other_threads");
CLIENT_ASSERT(dcontext->client_data->cur_mc == NULL,
"inconsistent usage of dr_suspend_all_other_threads");
dcontext->client_data->suspended = true;
}
}
}
}
/* Hide the two extra vars we need the client to pass back to us */
(*drcontexts)[out_suspended] = (void *) threads;
(*drcontexts)[out_suspended+1] = (void *)(ptr_uint_t) num_threads;
*num_suspended = out_suspended;
if (num_unsuspended != NULL)
*num_unsuspended = out_unsuspended;
return true;
}
DR_API
bool
dr_suspend_all_other_threads(OUT void ***drcontexts,
OUT uint *num_suspended,
OUT uint *num_unsuspended)
{
return dr_suspend_all_other_threads_ex(drcontexts, num_suspended,
num_unsuspended, 0);
}
bool
dr_resume_all_other_threads(IN void **drcontexts,
IN uint num_suspended)
{
thread_record_t **threads;
int num_threads;
uint i;
CLIENT_ASSERT(drcontexts != NULL,
"dr_suspend_all_other_threads invalid params");
LOG(GLOBAL, LOG_FRAGMENT, 2,
"dr_resume_all_other_threads\n");
threads = (thread_record_t **) drcontexts[num_suspended];
num_threads = (int)(ptr_int_t) drcontexts[num_suspended+1];
for (i = 0; i < num_suspended; i++) {
dcontext_t *dcontext = (dcontext_t *) drcontexts[i];
if (dcontext->client_data->cur_mc != NULL) {
/* clear any cached mc from dr_get_mcontext_priv() */
heap_free(dcontext, dcontext->client_data->cur_mc,
sizeof(*dcontext->client_data->cur_mc) HEAPACCT(ACCT_CLIENT));
dcontext->client_data->cur_mc = NULL;
}
dcontext->client_data->suspended = false;
}
global_heap_free(drcontexts, (num_threads+2)*sizeof(dcontext_t*)
HEAPACCT(ACCT_THREAD_MGT));
end_synch_with_all_threads(threads, num_threads, true/*resume*/);
return true;
}
DR_API
bool
dr_is_thread_native(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid param");
return is_thread_currently_native(dcontext->thread_record);
}
DR_API
bool
dr_retakeover_suspended_native_thread(void *drcontext)
{
bool res;
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid param");
/* XXX: I don't quite see why I need to pop these 2 when I'm doing
* what a regular retakeover would do
*/
KSTOP_NOT_MATCHING_DC(dcontext, fcache_default);
KSTOP_NOT_MATCHING_DC(dcontext, dispatch_num_exits);
res = os_thread_take_over_suspended_native(dcontext);
return res;
}
# ifdef UNIX
DR_API
bool
dr_set_itimer(int which, uint millisec,
void (*func)(void *drcontext, dr_mcontext_t *mcontext))
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (func == NULL && millisec != 0)
return false;
return set_itimer_callback(dcontext, which, millisec, NULL,
(void (*)(dcontext_t *, dr_mcontext_t *))func);
}
DR_API
uint
dr_get_itimer(int which)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
return get_itimer_frequency(dcontext, which);
}
# endif /* UNIX */
DR_API
dr_where_am_i_t
dr_where_am_i(void *drcontext, app_pc pc, OUT void **tag_out)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid param");
void *tag = NULL;
dr_where_am_i_t whereami = dcontext->whereami;
/* Further refine if pc is in the cache. */
if (whereami == DR_WHERE_FCACHE) {
fragment_t *fragment;
whereami = fcache_refine_whereami(dcontext, whereami, pc, &fragment);
if (fragment != NULL)
tag = fragment->tag;
}
if (tag_out != NULL)
*tag_out = tag;
return whereami;
}
#endif /* CLIENT_INTERFACE */
DR_API
/* Inserts inst as a non-application instruction into ilist prior to "where" */
void
instrlist_meta_preinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta(inst);
instrlist_preinsert(ilist, where, inst);
}
DR_API
/* Inserts inst as a non-application instruction into ilist after "where" */
void
instrlist_meta_postinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta(inst);
instrlist_postinsert(ilist, where, inst);
}
DR_API
/* Inserts inst as a non-application instruction onto the end of ilist */
void
instrlist_meta_append(instrlist_t *ilist, instr_t *inst)
{
instr_set_meta(inst);
instrlist_append(ilist, inst);
}
DR_API
void
instrlist_meta_fault_preinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_preinsert(ilist, where, inst);
}
DR_API
void
instrlist_meta_fault_postinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_postinsert(ilist, where, inst);
}
DR_API
void
instrlist_meta_fault_append(instrlist_t *ilist, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_append(ilist, inst);
}
static void
convert_va_list_to_opnd(dcontext_t *dcontext, opnd_t **args, uint num_args, va_list ap)
{
uint i;
ASSERT(num_args > 0);
/* allocate at least one argument opnd */
/* we don't check for GLOBAL_DCONTEXT since DR internally calls this */
*args = HEAP_ARRAY_ALLOC(dcontext, opnd_t, num_args,
ACCT_CLEANCALL, UNPROTECTED);
for (i = 0; i < num_args; i++) {
(*args)[i] = va_arg(ap, opnd_t);
CLIENT_ASSERT(opnd_is_valid((*args)[i]),
"Call argument: bad operand. Did you create a valid opnd_t?");
}
}
static void
free_va_opnd_list(dcontext_t *dcontext, uint num_args, opnd_t *args)
{
if (num_args != 0) {
HEAP_ARRAY_FREE(dcontext, args, opnd_t, num_args,
ACCT_CLEANCALL, UNPROTECTED);
}
}
/* dr_insert_* are used by general DR */
/* Inserts a complete call to callee with the passed-in arguments */
void
dr_insert_call(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
instr_t *label = INSTR_CREATE_label(drcontext);
dr_pred_type_t auto_pred = instrlist_get_auto_predicate(ilist);
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call: drcontext cannot be NULL");
instrlist_set_auto_predicate(ilist, DR_PRED_NONE);
#ifdef ARM
if (instr_predicate_is_cond(auto_pred)) {
/* auto_predicate is set, though we handle the clean call with a cbr
* because we require inserting instrumentation which modifies cpsr.
*/
MINSERT(ilist, where, XINST_CREATE_jump_cond
(drcontext,
instr_invert_predicate(auto_pred),
opnd_create_instr(label)));
}
#endif
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(dcontext, &args, num_args, ap);
va_end(ap);
}
insert_meta_call_vargs(dcontext, ilist, where, META_CALL_RETURNS,
vmcode_get_start(), callee, num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
MINSERT(ilist, where, label);
instrlist_set_auto_predicate(ilist, auto_pred);
}
bool
dr_insert_call_ex(void *drcontext, instrlist_t *ilist, instr_t *where,
byte *encode_pc, void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
bool direct;
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call: drcontext cannot be NULL");
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
direct = insert_meta_call_vargs(dcontext, ilist, where, META_CALL_RETURNS, encode_pc,
callee, num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
return direct;
}
/* Not exported. Currently used for ARM to avoid storing to %lr. */
void
dr_insert_call_noreturn(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call_noreturn: drcontext cannot be NULL");
CLIENT_ASSERT(instrlist_get_auto_predicate(ilist) == DR_PRED_NONE,
"Does not support auto-predication");
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(dcontext, &args, num_args, ap);
va_end(ap);
}
insert_meta_call_vargs(dcontext, ilist, where, 0, vmcode_get_start(), callee,
num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
}
/* Internal utility routine for inserting context save for a clean call.
* Returns the size of the data stored on the DR stack
* (in case the caller needs to align the stack pointer).
* XSP and XAX are modified by this call.
*/
static uint
prepare_for_call_ex(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *where, byte *encode_pc)
{
instr_t *in;
uint dstack_offs;
in = (where == NULL) ? instrlist_last(ilist) : instr_get_prev(where);
dstack_offs = prepare_for_clean_call(dcontext, cci, ilist, where, encode_pc);
/* now go through and mark inserted instrs as meta */
if (in == NULL)
in = instrlist_first(ilist);
else
in = instr_get_next(in);
while (in != where) {
instr_set_meta(in);
in = instr_get_next(in);
}
return dstack_offs;
}
/* Internal utility routine for inserting context restore for a clean call. */
static void
cleanup_after_call_ex(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *where, uint sizeof_param_area,
byte *encode_pc)
{
instr_t *in;
in = (where == NULL) ? instrlist_last(ilist) : instr_get_prev(where);
if (sizeof_param_area > 0) {
/* clean up the parameter area */
CLIENT_ASSERT(sizeof_param_area <= 127,
"cleanup_after_call_ex: sizeof_param_area must be <= 127");
/* mark it meta down below */
instrlist_preinsert(ilist, where,
XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT8(sizeof_param_area)));
}
cleanup_after_clean_call(dcontext, cci, ilist, where, encode_pc);
/* now go through and mark inserted instrs as meta */
if (in == NULL)
in = instrlist_first(ilist);
else
in = instr_get_next(in);
while (in != where) {
instr_set_meta(in);
in = instr_get_next(in);
}
}
/* Inserts a complete call to callee with the passed-in arguments, wrapped
* by an app save and restore.
*
* If "save_flags" includes DR_CLEANCALL_SAVE_FLOAT, saves the fp/mmx/sse state.
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_prepare_for_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*
* NOTE : dr_insert_cbr_instrumentation has assumption about the clean call
* instrumentation layout, changes to the clean call instrumentation may break
* dr_insert_cbr_instrumentation.
*/
void
dr_insert_clean_call_ex_varg(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, dr_cleancall_save_t save_flags,
uint num_args, opnd_t *args)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
uint dstack_offs, pad = 0;
size_t buf_sz = 0;
clean_call_info_t cci; /* information for clean call insertion. */
bool save_fpstate = TEST(DR_CLEANCALL_SAVE_FLOAT, save_flags);
meta_call_flags_t call_flags = META_CALL_CLEAN | META_CALL_RETURNS;
byte *encode_pc;
instr_t *label = INSTR_CREATE_label(drcontext);
dr_pred_type_t auto_pred = instrlist_get_auto_predicate(ilist);
CLIENT_ASSERT(drcontext != NULL, "dr_insert_clean_call: drcontext cannot be NULL");
STATS_INC(cleancall_inserted);
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: insert clean call to "PFX"\n", callee);
instrlist_set_auto_predicate(ilist, DR_PRED_NONE);
#ifdef ARM
if (instr_predicate_is_cond(auto_pred)) {
/* auto_predicate is set, though we handle the clean call with a cbr
* because we require inserting instrumentation which modifies cpsr.
*/
MINSERT(ilist, where, XINST_CREATE_jump_cond
(drcontext,
instr_invert_predicate(auto_pred),
opnd_create_instr(label)));
}
#endif
/* analyze the clean call, return true if clean call can be inlined. */
if (analyze_clean_call(dcontext, &cci, where, callee, save_fpstate,
TEST(DR_CLEANCALL_ALWAYS_OUT_OF_LINE, save_flags),
num_args, args) &&
!TEST(DR_CLEANCALL_ALWAYS_OUT_OF_LINE, save_flags)) {
#ifdef CLIENT_INTERFACE
/* we can perform the inline optimization and return. */
STATS_INC(cleancall_inlined);
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: inlined callee "PFX"\n", callee);
insert_inline_clean_call(dcontext, &cci, ilist, where, args);
MINSERT(ilist, where, label);
instrlist_set_auto_predicate(ilist, auto_pred);
return;
#else /* CLIENT_INTERFACE */
ASSERT_NOT_REACHED();
#endif /* CLIENT_INTERFACE */
}
/* honor requests from caller */
if (TEST(DR_CLEANCALL_NOSAVE_FLAGS, save_flags)) {
/* even if we remove flag saves we want to keep mcontext shape */
cci.preserve_mcontext = true;
cci.skip_save_flags = true;
/* we assume this implies DF should be 0 already */
cci.skip_clear_flags = true;
/* XXX: should also provide DR_CLEANCALL_NOSAVE_NONAFLAGS to
* preserve just arith flags on return from a call
*/
}
if (TESTANY(DR_CLEANCALL_NOSAVE_XMM |
DR_CLEANCALL_NOSAVE_XMM_NONPARAM |
DR_CLEANCALL_NOSAVE_XMM_NONRET, save_flags)) {
uint i;
/* even if we remove xmm saves we want to keep mcontext shape */
cci.preserve_mcontext = true;
/* start w/ all */
#if defined(X64) && defined(WINDOWS)
cci.num_simd_skip = 6;
#else
/* all 8 (or 16) are scratch */
cci.num_simd_skip = NUM_SIMD_REGS;
#endif
for (i=0; i<cci.num_simd_skip; i++)
cci.simd_skip[i] = true;
/* now remove those used for param/retval */
#ifdef X64
if (TEST(DR_CLEANCALL_NOSAVE_XMM_NONPARAM, save_flags)) {
/* xmm0-3 (-7 for linux) are used for params */
for (i=0; i<IF_UNIX_ELSE(7,3); i++)
cci.simd_skip[i] = false;
cci.num_simd_skip -= i;
}
if (TEST(DR_CLEANCALL_NOSAVE_XMM_NONRET, save_flags)) {
/* xmm0 (and xmm1 for linux) are used for retvals */
cci.simd_skip[0] = false;
cci.num_simd_skip--;
# ifdef UNIX
cci.simd_skip[1] = false;
cci.num_simd_skip--;
# endif
}
#endif
}
if (TEST(DR_CLEANCALL_INDIRECT, save_flags))
encode_pc = vmcode_unreachable_pc();
else
encode_pc = vmcode_get_start();
dstack_offs = prepare_for_call_ex(dcontext, &cci, ilist, where, encode_pc);
#ifdef X64
/* PR 218790: we assume that dr_prepare_for_call() leaves stack 16-byte
* aligned, which is what insert_meta_call_vargs requires. */
if (cci.should_align) {
CLIENT_ASSERT(ALIGNED(dstack_offs, 16),
"internal error: bad stack alignment");
}
#endif
if (save_fpstate) {
/* save on the stack: xref PR 202669 on clients using more stack */
buf_sz = proc_fpstate_save_size();
/* we need 16-byte-alignment */
pad = ALIGN_FORWARD_UINT(dstack_offs, 16) - dstack_offs;
IF_X64(CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_int(buf_sz + pad),
"dr_insert_clean_call: internal truncation error"));
MINSERT(ilist, where, XINST_CREATE_sub(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT32((int)(buf_sz + pad))));
dr_insert_save_fpstate(drcontext, ilist, where,
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0,
OPSZ_512));
}
/* PR 302951: restore state if clean call args reference app memory.
* We use a hack here: this is the only instance where we mark as our-mangling
* but do not have a translation target set, which indicates to the restore
* routines that this is a clean call. If the client adds instrs in the middle
* translation will fail; if the client modifies any instr, the our-mangling
* flag will disappear and translation will fail.
*/
instrlist_set_our_mangling(ilist, true);
if (TEST(DR_CLEANCALL_RETURNS_TO_NATIVE, save_flags))
call_flags |= META_CALL_RETURNS_TO_NATIVE;
insert_meta_call_vargs(dcontext, ilist, where, call_flags,
encode_pc, callee, num_args, args);
instrlist_set_our_mangling(ilist, false);
if (save_fpstate) {
dr_insert_restore_fpstate(drcontext, ilist, where,
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0,
OPSZ_512));
MINSERT(ilist, where, XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT32(buf_sz + pad)));
}
cleanup_after_call_ex(dcontext, &cci, ilist, where, 0, encode_pc);
MINSERT(ilist, where, label);
instrlist_set_auto_predicate(ilist, auto_pred);
}
void
dr_insert_clean_call_ex(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, dr_cleancall_save_t save_flags, uint num_args, ...)
{
opnd_t *args = NULL;
if (num_args != 0) {
va_list ap;
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
dr_insert_clean_call_ex_varg(drcontext, ilist, where, callee, save_flags,
num_args, args);
if (num_args != 0)
free_va_opnd_list(drcontext, num_args, args);
}
DR_API
void
dr_insert_clean_call(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, bool save_fpstate, uint num_args, ...)
{
dr_cleancall_save_t flags = (save_fpstate ? DR_CLEANCALL_SAVE_FLOAT : 0);
opnd_t *args = NULL;
if (num_args != 0) {
va_list ap;
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
dr_insert_clean_call_ex_varg(drcontext, ilist, where, callee, flags, num_args, args);
if (num_args != 0)
free_va_opnd_list(drcontext, num_args, args);
}
/* Utility routine for inserting a clean call to an instrumentation routine
* Returns the size of the data stored on the DR stack (in case the caller
* needs to align the stack pointer). XSP and XAX are modified by this call.
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* prepare_for_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*/
DR_API uint
dr_prepare_for_call(void *drcontext, instrlist_t *ilist, instr_t *where)
{
CLIENT_ASSERT(drcontext != NULL, "dr_prepare_for_call: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_prepare_for_call: drcontext is invalid");
return prepare_for_call_ex((dcontext_t *)drcontext, NULL, ilist, where,
vmcode_get_start());
}
DR_API void
dr_cleanup_after_call(void *drcontext, instrlist_t *ilist, instr_t *where,
uint sizeof_param_area)
{
CLIENT_ASSERT(drcontext != NULL, "dr_cleanup_after_call: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_cleanup_after_call: drcontext is invalid");
cleanup_after_call_ex((dcontext_t *)drcontext, NULL, ilist, where,
sizeof_param_area, vmcode_get_start());
}
#ifdef CLIENT_INTERFACE
DR_API void
dr_swap_to_clean_stack(void *drcontext, instrlist_t *ilist, instr_t *where)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_swap_to_clean_stack: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_swap_to_clean_stack: drcontext is invalid");
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
if (SCRATCH_ALWAYS_TLS()) {
MINSERT(ilist, where, instr_create_save_to_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
insert_get_mcontext_base(dcontext, ilist, where, SCRATCH_REG0);
/* save app xsp, and then bring in dstack to xsp */
MINSERT(ilist, where, instr_create_save_to_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, XSP_OFFSET));
/* DSTACK_OFFSET isn't within the upcontext so if it's separate this won't
* work right. FIXME - the dcontext accessing routines are a mess of shared
* vs. no shared support, separate context vs. no separate context support etc. */
ASSERT_NOT_IMPLEMENTED(!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, DSTACK_OFFSET));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
}
else {
MINSERT(ilist, where, instr_create_save_to_dcontext
(dcontext, REG_XSP, XSP_OFFSET));
MINSERT(ilist, where, instr_create_restore_dynamo_stack(dcontext));
}
}
DR_API void
dr_restore_app_stack(void *drcontext, instrlist_t *ilist, instr_t *where)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_restore_app_stack: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_app_stack: drcontext is invalid");
/* restore stack */
if (SCRATCH_ALWAYS_TLS()) {
/* use the register we're about to clobber as scratch space */
insert_get_mcontext_base(dcontext, ilist, where, REG_XSP);
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, REG_XSP, REG_XSP, XSP_OFFSET));
} else {
MINSERT(ilist, where, instr_create_restore_from_dcontext
(dcontext, REG_XSP, XSP_OFFSET));
}
}
#define SPILL_SLOT_TLS_MAX 2
#define NUM_TLS_SPILL_SLOTS (SPILL_SLOT_TLS_MAX + 1)
#define NUM_SPILL_SLOTS (SPILL_SLOT_MAX + 1)
/* The three tls slots we make available to clients. We reserve TLS_REG0_SLOT for our
* own use in dr convenience routines. Note the +1 is because the max is an array index
* (so zero based) while array size is number of slots. We don't need to +1 in
* SPILL_SLOT_MC_REG because subtracting SPILL_SLOT_TLS_MAX already accounts for it. */
static const ushort SPILL_SLOT_TLS_OFFS[NUM_TLS_SPILL_SLOTS] =
{ TLS_REG3_SLOT, TLS_REG2_SLOT, TLS_REG1_SLOT };
static const reg_id_t SPILL_SLOT_MC_REG[NUM_SPILL_SLOTS - NUM_TLS_SPILL_SLOTS] = {
#ifdef X86
/* The dcontext reg slots we make available to clients. We reserve XAX and XSP for
* our own use in dr convenience routines. */
# ifdef X64
REG_R15, REG_R14, REG_R13, REG_R12, REG_R11, REG_R10, REG_R9, REG_R8,
# endif
REG_XDI, REG_XSI, REG_XBP, REG_XDX, REG_XCX, REG_XBX
#elif defined(AARCHXX)
/* DR_REG_R0 is not used here. See prepare_for_clean_call. */
DR_REG_R6, DR_REG_R5, DR_REG_R4, DR_REG_R3, DR_REG_R2, DR_REG_R1
#endif /* X86/ARM */
};
DR_API void
dr_save_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg,
dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_save_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_save_reg: invalid spill slot selection");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_save_reg requires pointer-sized gpr");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
MINSERT(ilist, where,
XINST_CREATE_store(dcontext, opnd_create_tls_slot(offs),
opnd_create_reg(reg)));
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
if (SCRATCH_ALWAYS_TLS()) {
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
reg_id_t tmp = (reg == SCRATCH_REG0) ? SCRATCH_REG1 : SCRATCH_REG0;
MINSERT(ilist, where, instr_create_save_to_tls
(dcontext, tmp, TLS_REG0_SLOT));
insert_get_mcontext_base(dcontext, ilist, where, tmp);
MINSERT(ilist, where, instr_create_save_to_dc_via_reg
(dcontext, tmp, reg, offs));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, tmp, TLS_REG0_SLOT));
} else {
MINSERT(ilist, where, instr_create_save_to_dcontext(dcontext, reg, offs));
}
}
}
/* if want to save 8 or 16-bit reg, must pass in containing ptr-sized reg! */
DR_API void
dr_restore_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg,
dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_restore_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_restore_reg: invalid spill slot selection");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_restore_reg requires a pointer-sized gpr");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
MINSERT(ilist, where,
XINST_CREATE_load(dcontext, opnd_create_reg(reg),
opnd_create_tls_slot(offs)));
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
if (SCRATCH_ALWAYS_TLS()) {
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
/* use the register we're about to clobber as scratch space */
insert_get_mcontext_base(dcontext, ilist, where, reg);
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, reg, reg, offs));
} else {
MINSERT(ilist, where,
instr_create_restore_from_dcontext(dcontext, reg, offs));
}
}
}
DR_API dr_spill_slot_t
dr_max_opnd_accessible_spill_slot()
{
if (SCRATCH_ALWAYS_TLS())
return SPILL_SLOT_TLS_MAX;
else
return SPILL_SLOT_MAX;
}
/* creates an opnd to access spill slot slot, slot must be <=
* dr_max_opnd_accessible_spill_slot() */
opnd_t
reg_spill_slot_opnd(dcontext_t *dcontext, dr_spill_slot_t slot)
{
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
return opnd_create_tls_slot(offs);
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
ASSERT(!SCRATCH_ALWAYS_TLS()); /* client assert above should catch */
return opnd_create_dcontext_field(dcontext, offs);
}
}
DR_API
opnd_t
dr_reg_spill_slot_opnd(void *drcontext, dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_reg_spill_slot_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_reg_spill_slot_opnd: drcontext is invalid");
CLIENT_ASSERT(slot <= dr_max_opnd_accessible_spill_slot(),
"dr_reg_spill_slot_opnd: slot must be less than "
"dr_max_opnd_accessible_spill_slot()");
return reg_spill_slot_opnd(dcontext, slot);
}
DR_API
/* used to read a saved register spill slot from a clean call or a restore_state_event */
reg_t
dr_read_saved_reg(void *drcontext, dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(drcontext != NULL, "dr_read_saved_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_read_saved_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_read_saved_reg: invalid spill slot selection");
/* FIXME - should we allow clients to read other threads saved registers? It's not
* as dangerous as write, but I can't think of a usage scenario where you'd want to
* Seems more likely to be a bug. */
CLIENT_ASSERT(dcontext == get_thread_private_dcontext(),
"dr_read_saved_reg(): drcontext does not belong to current thread");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = SPILL_SLOT_TLS_OFFS[slot];
return *(reg_t *)(((byte *)&dcontext->local_state->spill_space) + offs);
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
return reg_get_value_priv(reg_slot, get_mcontext(dcontext));
}
}
DR_API
/* used to write a saved register spill slot from a clean call */
void
dr_write_saved_reg(void *drcontext, dr_spill_slot_t slot, reg_t value)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(drcontext != NULL, "dr_write_saved_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_write_saved_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_write_saved_reg: invalid spill slot selection");
/* FIXME - should we allow clients to write to other threads saved registers?
* I can't think of a usage scenario where that would be correct, seems much more
* likely to be a difficult to diagnose bug that crashes the app or dr. */
CLIENT_ASSERT(dcontext == get_thread_private_dcontext(),
"dr_write_saved_reg(): drcontext does not belong to current thread");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = SPILL_SLOT_TLS_OFFS[slot];
*(reg_t *)(((byte *)&dcontext->local_state->spill_space) + offs) = value;
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
reg_set_value_priv(reg_slot, get_mcontext(dcontext), value);
}
}
DR_API
/**
* Inserts into ilist prior to "where" instruction(s) to read into the
* general-purpose full-size register reg from the user-controlled drcontext
* field for this thread.
*/
void
dr_insert_read_tls_field(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_read_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
if (SCRATCH_ALWAYS_TLS()) {
/* For thread-shared, since reg must be general-purpose we can
* use it as a base pointer (repeatedly). Plus it's already dead.
*/
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, reg, TLS_DCONTEXT_SLOT));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, reg, reg, CLIENT_DATA_OFFSET));
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
OPND_CREATE_MEMPTR(reg, offsetof(client_data_t, user_field))));
} else {
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
OPND_CREATE_ABSMEM(&dcontext->client_data->user_field, OPSZ_PTR)));
}
}
DR_API
/**
* Inserts into ilist prior to "where" instruction(s) to write the
* general-purpose full-size register reg to the user-controlled drcontext field
* for this thread.
*/
void
dr_insert_write_tls_field(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_write_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
if (SCRATCH_ALWAYS_TLS()) {
reg_id_t spill = SCRATCH_REG0;
if (reg == spill) /* don't need sub-reg test b/c we know it's pointer-sized */
spill = SCRATCH_REG1;
MINSERT(ilist, where, instr_create_save_to_tls(dcontext, spill, TLS_REG0_SLOT));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, spill, TLS_DCONTEXT_SLOT));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, spill, spill, CLIENT_DATA_OFFSET));
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, OPND_CREATE_MEMPTR(spill,
offsetof(client_data_t, user_field)),
opnd_create_reg(reg)));
MINSERT(ilist, where,
instr_create_restore_from_tls(dcontext, spill, TLS_REG0_SLOT));
} else {
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, OPND_CREATE_ABSMEM
(&dcontext->client_data->user_field, OPSZ_PTR),
opnd_create_reg(reg)));
}
}
DR_API void
dr_save_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where,
dr_spill_slot_t slot)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
CLIENT_ASSERT(drcontext != NULL,
"dr_save_arith_flags: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_arith_flags: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_save_arith_flags: invalid spill slot selection");
dr_save_reg(drcontext, ilist, where, reg, slot);
dr_save_arith_flags_to_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_restore_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where,
dr_spill_slot_t slot)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
CLIENT_ASSERT(drcontext != NULL,
"dr_restore_arith_flags: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_arith_flags: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_restore_arith_flags: invalid spill slot selection");
dr_restore_arith_flags_from_reg(drcontext, ilist, where, reg);
dr_restore_reg(drcontext, ilist, where, reg, slot);
}
DR_API void
dr_save_arith_flags_to_xax(void *drcontext, instrlist_t *ilist, instr_t *where)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
dr_save_arith_flags_to_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_restore_arith_flags_from_xax(void *drcontext, instrlist_t *ilist,
instr_t *where)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
dr_restore_arith_flags_from_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_save_arith_flags_to_reg(void *drcontext, instrlist_t *ilist,
instr_t *where, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_save_arith_flags_to_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_arith_flags_to_reg: drcontext is invalid");
#ifdef X86
CLIENT_ASSERT(reg == DR_REG_XAX,
"only xax should be used for save arith flags in X86");
/* flag saving code:
* lahf
* seto al
*/
MINSERT(ilist, where, INSTR_CREATE_lahf(dcontext));
MINSERT(ilist, where,
INSTR_CREATE_setcc(dcontext, OP_seto, opnd_create_reg(REG_AL)));
#elif defined(ARM)
/* flag saving code: mrs reg, cpsr */
MINSERT(ilist, where,
INSTR_CREATE_mrs(dcontext,
opnd_create_reg(reg),
opnd_create_reg(DR_REG_CPSR)));
#elif defined(AARCH64)
/* flag saving code: mrs reg, nzcv */
MINSERT(ilist, where,
INSTR_CREATE_mrs(dcontext,
opnd_create_reg(reg),
opnd_create_reg(DR_REG_NZCV)));
#endif /* X86/ARM/AARCH64 */
}
DR_API void
dr_restore_arith_flags_from_reg(void *drcontext, instrlist_t *ilist,
instr_t *where, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_restore_arith_flags_from_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_arith_flags_from_reg: drcontext is invalid");
#ifdef X86
CLIENT_ASSERT(reg == DR_REG_XAX,
"only xax should be used for save arith flags in X86");
/* flag restoring code:
* add 0x7f,%al
* sahf
*/
/* do an add such that OF will be set only if seto set
* the MSB of saveto to 1
*/
MINSERT(ilist, where,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_AL), OPND_CREATE_INT8(0x7f)));
MINSERT(ilist, where, INSTR_CREATE_sahf(dcontext));
#elif defined(ARM)
/* flag restoring code: mrs reg, apsr_nzcvqg */
MINSERT(ilist, where,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_CPSR),
OPND_CREATE_INT_MSR_NZCVQG(),
opnd_create_reg(reg)));
#elif defined(AARCH64)
/* flag restoring code: mrs reg, nzcv */
MINSERT(ilist, where,
INSTR_CREATE_msr(dcontext,
opnd_create_reg(DR_REG_NZCV),
opnd_create_reg(reg)));
#endif /* X86/ARM/AARCH64 */
}
/* providing functionality of old -instr_calls and -instr_branches flags
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*/
DR_API void
dr_insert_call_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
ptr_uint_t target, address;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_call_instrumentation: drcontext cannot be NULL");
address = (ptr_uint_t) instr_get_translation(instr);
/* dr_insert_ubr_instrumentation() uses this function */
CLIENT_ASSERT(instr_is_call(instr) || instr_is_ubr(instr),
"dr_insert_{ubr,call}_instrumentation must be applied to a ubr");
CLIENT_ASSERT(address != 0,
"dr_insert_{ubr,call}_instrumentation: can't determine app address");
if (opnd_is_pc(instr_get_target(instr))) {
if (opnd_is_far_pc(instr_get_target(instr))) {
/* FIXME: handle far pc */
CLIENT_ASSERT(false,
"dr_insert_{ubr,call}_instrumentation: far pc not supported");
}
/* In release build for far pc keep going assuming 0 base */
target = (ptr_uint_t) opnd_get_pc(instr_get_target(instr));
}
else if (opnd_is_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
target = (ptr_uint_t) instr_get_translation(tgt);
CLIENT_ASSERT(target != 0,
"dr_insert_{ubr,call}_instrumentation: unknown target");
if (opnd_is_far_instr(instr_get_target(instr))) {
/* FIXME: handle far instr */
CLIENT_ASSERT(false, "dr_insert_{ubr,call}_instrumentation: far instr "
"not supported");
}
} else {
CLIENT_ASSERT(false, "dr_insert_{ubr,call}_instrumentation: unknown target");
target = 0;
}
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 2,
/* address of call is 1st parameter */
OPND_CREATE_INTPTR(address),
/* call target is 2nd parameter */
OPND_CREATE_INTPTR(target));
}
/* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched. Since we need another
* tls spill slot in this routine we require the caller to give us one. */
DR_API void
dr_insert_mbr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, dr_spill_slot_t scratch_slot)
{
#ifdef X86
dcontext_t *dcontext = (dcontext_t *) drcontext;
ptr_uint_t address = (ptr_uint_t) instr_get_translation(instr);
opnd_t tls_opnd;
instr_t *newinst;
reg_id_t reg_target;
/* PR 214051: dr_insert_mbr_instrumentation() broken with -indcall2direct */
CLIENT_ASSERT(!DYNAMO_OPTION(indcall2direct),
"dr_insert_mbr_instrumentation not supported with -opt_speed");
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_mbr_instrumentation: drcontext cannot be NULL");
CLIENT_ASSERT(address != 0,
"dr_insert_mbr_instrumentation: can't determine app address");
CLIENT_ASSERT(instr_is_mbr(instr),
"dr_insert_mbr_instrumentation must be applied to an mbr");
/* We need a TLS spill slot to use. We can use any tls slot that is opnd
* accessible. */
CLIENT_ASSERT(scratch_slot <= dr_max_opnd_accessible_spill_slot(),
"dr_insert_mbr_instrumentation: scratch_slot must be less than "
"dr_max_opnd_accessible_spill_slot()");
/* It is possible for mbr instruction to use XCX register, so we have
* to use an unsed register.
*/
for (reg_target = REG_XAX; reg_target <= REG_XBX; reg_target++) {
if (!instr_uses_reg(instr, reg_target))
break;
}
/* PR 240265: we disallow clients to add post-mbr instrumentation, so we
* avoid doing that here even though it's a little less efficient since
* our mbr mangling will re-grab the target.
* We could keep it post-mbr and mark it w/ a special flag so we allow
* our own but not clients' instrumentation post-mbr: but then we
* hit post-syscall issues for wow64 where post-mbr equals post-syscall
* (PR 240258: though we might solve that some other way).
*/
/* Note that since we're using a client exposed slot we know it will be
* preserved across the clean call. */
tls_opnd = dr_reg_spill_slot_opnd(drcontext, scratch_slot);
newinst = XINST_CREATE_store(dcontext, tls_opnd, opnd_create_reg(reg_target));
/* PR 214962: ensure we'll properly translate the de-ref of app
* memory by marking the spill and de-ref as INSTR_OUR_MANGLING.
*/
instr_set_our_mangling(newinst, true);
MINSERT(ilist, instr, newinst);
if (instr_is_return(instr)) {
/* the retaddr operand is always the final source for all OP_ret* instrs */
opnd_t retaddr = instr_get_src(instr, instr_num_srcs(instr) - 1);
opnd_size_t sz = opnd_get_size(retaddr);
/* Even for far ret and iret, retaddr is at TOS
* but operand size needs to be set to stack size
* since iret pops more than return address.
*/
opnd_set_size(&retaddr, OPSZ_STACK);
newinst = instr_create_1dst_1src(dcontext, sz == OPSZ_2 ? OP_movzx : OP_mov_ld,
opnd_create_reg(reg_target), retaddr);
} else {
/* call* or jmp* */
opnd_t src = instr_get_src(instr, 0);
opnd_size_t sz = opnd_get_size(src);
/* if a far cti, we can't fit it into a register: asserted above.
* in release build we'll get just the address here.
*/
if (instr_is_far_cti(instr)) {
if (sz == OPSZ_10) {
sz = OPSZ_8;
} else if (sz == OPSZ_6) {
sz = OPSZ_4;
# ifdef X64
reg_target = reg_64_to_32(reg_target);
# endif
} else /* target has OPSZ_4 */ {
sz = OPSZ_2;
}
opnd_set_size(&src, sz);
}
# ifdef UNIX
/* xref i#1834 the problem with fs and gs segment is a general problem
* on linux, this fix is specific for mbr_instrumentation, but a general
* solution is needed.
*/
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(src)) {
src = mangle_seg_ref_opnd(dcontext, ilist, instr, src, reg_target);
}
# endif
newinst = instr_create_1dst_1src(dcontext,
sz == OPSZ_2 ? OP_movzx : OP_mov_ld,
opnd_create_reg(reg_target), src);
}
instr_set_our_mangling(newinst, true);
MINSERT(ilist, instr, newinst);
/* Now we want the true app state saved, for dr_get_mcontext().
* We specially recognize our OP_xchg as a restore in
* instr_is_reg_spill_or_restore().
*/
MINSERT(ilist, instr,
INSTR_CREATE_xchg(dcontext, tls_opnd, opnd_create_reg(reg_target)));
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 2,
/* address of mbr is 1st param */
OPND_CREATE_INTPTR(address),
/* indirect target (in tls, xchg-d from ecx) is 2nd param */
tls_opnd);
#elif defined (ARM)
/* i#1551: NYI on ARM.
* Also, we may want to split these out into arch/{x86,arm}/ files
*/
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
/* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*
* NOTE : this routine has assumption about the layout of the clean call,
* so any change to clean call instrumentation layout may break this routine.
*/
static void
dr_insert_cbr_instrumentation_help(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, bool has_fallthrough, opnd_t user_data)
{
#ifdef X86
dcontext_t *dcontext = (dcontext_t *) drcontext;
ptr_uint_t address, target;
int opc;
instr_t *app_flags_ok;
bool out_of_line_switch = false;;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_cbr_instrumentation: drcontext cannot be NULL");
address = (ptr_uint_t) instr_get_translation(instr);
CLIENT_ASSERT(address != 0,
"dr_insert_cbr_instrumentation: can't determine app address");
CLIENT_ASSERT(instr_is_cbr(instr),
"dr_insert_cbr_instrumentation must be applied to a cbr");
CLIENT_ASSERT(opnd_is_near_pc(instr_get_target(instr)) ||
opnd_is_near_instr(instr_get_target(instr)),
"dr_insert_cbr_instrumentation: target opnd must be a near pc or "
"near instr");
if (opnd_is_near_pc(instr_get_target(instr)))
target = (ptr_uint_t) opnd_get_pc(instr_get_target(instr));
else if (opnd_is_near_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
target = (ptr_uint_t) instr_get_translation(tgt);
CLIENT_ASSERT(target != 0, "dr_insert_cbr_instrumentation: unknown target");
} else {
CLIENT_ASSERT(false, "dr_insert_cbr_instrumentation: unknown target");
target = 0;
}
app_flags_ok = instr_get_prev(instr);
if (has_fallthrough) {
ptr_uint_t fallthrough = address + instr_length(drcontext, instr);
CLIENT_ASSERT(!opnd_uses_reg(user_data, DR_REG_XBX),
"register ebx should not be used");
CLIENT_ASSERT(fallthrough > address, "wrong fallthrough address");
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 5,
/* push address of mbr onto stack as 1st parameter */
OPND_CREATE_INTPTR(address),
/* target is 2nd parameter */
OPND_CREATE_INTPTR(target),
/* fall-throug is 3rd parameter */
OPND_CREATE_INTPTR(fallthrough),
/* branch direction (put in ebx below) is 4th parameter */
opnd_create_reg(REG_XBX),
/* user defined data is 5th parameter */
opnd_is_null(user_data) ? OPND_CREATE_INT32(0) : user_data);
} else {
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 3,
/* push address of mbr onto stack as 1st parameter */
OPND_CREATE_INTPTR(address),
/* target is 2nd parameter */
OPND_CREATE_INTPTR(target),
/* branch direction (put in ebx below) is 3rd parameter */
opnd_create_reg(REG_XBX));
}
/* calculate whether branch taken or not
* since the clean call mechanism clobbers eflags, we
* must insert our checks prior to that clobbering.
* since we do it AFTER the pusha, we don't have to save; but, we
* can't use a param that's part of any calling convention b/c w/
* PR 250976 our clean call will get it from the pusha.
* ebx is a good choice.
*/
/* We expect:
mov 0x400e5e34 -> %esp
pusha %esp %eax %ebx %ecx %edx %ebp %esi %edi -> %esp (%esp)
pushf %esp -> %esp (%esp)
push $0x00000000 %esp -> %esp (%esp)
popf %esp (%esp) -> %esp
mov 0x400e5e40 -> %eax
push %eax %esp -> %esp (%esp)
* We also assume all clean call instrs are expanded.
*/
/* Because the clean call might be optimized, we cannot assume the sequence.
* We assume that the clean call will not be inlined for having more than one
* arguments, so we scan to find either a call instr or a popf.
* if a popf, do as before.
* if a call, move back to right before push xbx or mov rbx => r3.
*/
if (app_flags_ok == NULL)
app_flags_ok = instrlist_first(ilist);
/* r2065 added out-of-line clean call context switch, so we need to check
* how the context switch code is inserted.
*/
while (!instr_opcode_valid(app_flags_ok) ||
instr_get_opcode(app_flags_ok) != OP_call) {
app_flags_ok = instr_get_next(app_flags_ok);
CLIENT_ASSERT(app_flags_ok != NULL,
"dr_insert_cbr_instrumentation: cannot find call instr");
if (instr_get_opcode(app_flags_ok) == OP_popf)
break;
}
if (instr_get_opcode(app_flags_ok) == OP_call) {
if (opnd_get_pc(instr_get_target(app_flags_ok)) == (app_pc)callee) {
/* call to clean callee
* move a few instrs back till right before push xbx, or mov rbx => r3
*/
while (app_flags_ok != NULL) {
if (instr_reg_in_src(app_flags_ok, DR_REG_XBX))
break;
app_flags_ok = instr_get_prev(app_flags_ok);
}
} else {
/* call to clean call context save */
ASSERT(opnd_get_pc(instr_get_target(app_flags_ok)) ==
get_clean_call_save(dcontext _IF_X64(GENCODE_X64)));
out_of_line_switch = true;
}
ASSERT(app_flags_ok != NULL);
}
/* i#1155: for out-of-line context switch
* we insert two parts of code to setup "taken" arg for clean call:
* - compute "taken" and put it onto the stack right before call to context
* save, where DR already swapped stack and adjusted xsp to point beyond
* mcontext plus temp stack size.
* It is 2 slots away b/c 1st is retaddr.
* - move the "taken" from stack to ebx to compatible with existing code
* right after context save returns and before arg setup, where xsp
* points beyond mcontext (xref emit_clean_call_save).
* It is 2 slots + temp stack size away.
* XXX: we could optimize the code by computing "taken" after clean call
* save if the eflags are not cleared.
*/
/* put our code before the popf or use of xbx */
opc = instr_get_opcode(instr);
if (opc == OP_jecxz || opc == OP_loop || opc == OP_loope || opc == OP_loopne) {
/* for 8-bit cbrs w/ multiple conditions and state, simpler to
* simply execute them -- they're rare so shouldn't be a perf hit.
* after all, ecx is saved, can clobber it.
* we do:
* loop/jecxz taken
* not_taken: mov 0, ebx
* jmp done
* taken: mov 1, ebx
* done:
*/
opnd_t opnd_taken = out_of_line_switch ?
/* 2 slots away from xsp, xref comment above for i#1155 */
OPND_CREATE_MEM32(REG_XSP, -2*(int)XSP_SZ /* ret+taken */) :
opnd_create_reg(REG_EBX);
instr_t *branch = instr_clone(dcontext, instr);
instr_t *not_taken =
INSTR_CREATE_mov_imm(dcontext, opnd_taken,
OPND_CREATE_INT32(0));
instr_t *taken =
INSTR_CREATE_mov_imm(dcontext, opnd_taken,
OPND_CREATE_INT32(1));
instr_t *done = INSTR_CREATE_label(dcontext);
instr_set_target(branch, opnd_create_instr(taken));
/* client-added meta instrs should not have translation set */
instr_set_translation(branch, NULL);
MINSERT(ilist, app_flags_ok, branch);
MINSERT(ilist, app_flags_ok, not_taken);
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_jmp_short(dcontext, opnd_create_instr(done)));
MINSERT(ilist, app_flags_ok, taken);
MINSERT(ilist, app_flags_ok, done);
if (out_of_line_switch) {
if (opc == OP_loop || opc == OP_loope || opc == OP_loopne) {
/* We executed OP_loop* before we saved xcx, so we must restore
* it. We should be able to use OP_lea b/c OP_loop* uses
* addr prefix to shrink pointer-sized xcx, not data prefix.
*/
reg_id_t xcx = opnd_get_reg(instr_get_dst(instr, 0));
MINSERT(ilist, app_flags_ok, INSTR_CREATE_lea
(dcontext, opnd_create_reg(xcx),
opnd_create_base_disp(xcx, DR_REG_NULL, 0, 1, OPSZ_lea)));
}
ASSERT(instr_get_opcode(app_flags_ok) == OP_call);
/* 2 slots + temp_stack_size away from xsp,
* xref comment above for i#1155
*/
opnd_taken = OPND_CREATE_MEM32
(REG_XSP, -2*(int)XSP_SZ-get_clean_call_temp_stack_size());
MINSERT(ilist, instr_get_next(app_flags_ok),
XINST_CREATE_load(dcontext,
opnd_create_reg(REG_EBX),
opnd_taken));
}
} else {
/* build a setcc equivalent of instr's jcc operation
* WARNING: this relies on order of OP_ enum!
*/
opnd_t opnd_taken = out_of_line_switch ?
/* 2 slots away from xsp, xref comment above for i#1155 */
OPND_CREATE_MEM8(REG_XSP, -2*(int)XSP_SZ /* ret+taken */) :
opnd_create_reg(REG_BL);
opc = instr_get_opcode(instr);
if (opc <= OP_jnle_short)
opc += (OP_jo - OP_jo_short);
CLIENT_ASSERT(opc >= OP_jo && opc <= OP_jnle,
"dr_insert_cbr_instrumentation: unknown opcode");
opc = opc - OP_jo + OP_seto;
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_setcc(dcontext, opc, opnd_taken));
if (out_of_line_switch) {
app_flags_ok = instr_get_next(app_flags_ok);
/* 2 slots + temp_stack_size away from xsp,
* xref comment above for i#1155
*/
opnd_taken = OPND_CREATE_MEM8
(REG_XSP, -2*(int)XSP_SZ-get_clean_call_temp_stack_size());
}
/* movzx ebx <- bl */
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX),
opnd_taken));
}
/* now branch dir is in ebx and will be passed to clean call */
#elif defined (ARM)
/* i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
DR_API void
dr_insert_cbr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
dr_insert_cbr_instrumentation_help(drcontext, ilist, instr, callee,
false /* no fallthrough */, opnd_create_null());
}
DR_API void
dr_insert_cbr_instrumentation_ex(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, opnd_t user_data)
{
dr_insert_cbr_instrumentation_help(drcontext, ilist, instr, callee,
true /* has fallthrough */, user_data);
}
DR_API void
dr_insert_ubr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
/* same as call */
dr_insert_call_instrumentation(drcontext, ilist, instr, callee);
}
/* This may seem like a pretty targeted API function, but there's no
* clean way for a client to do this on its own due to DR's
* restrictions on bb instrumentation (i#782).
*/
DR_API
bool
dr_clobber_retaddr_after_read(void *drcontext, instrlist_t *ilist, instr_t *instr,
ptr_uint_t value)
{
/* the client could be using note fields so we use a label and xfer to
* a note field during the mangling pass
*/
if (instr_is_return(instr)) {
instr_t *label = INSTR_CREATE_label(drcontext);
dr_instr_label_data_t *data = instr_get_label_data_area(label);
/* we could coordinate w/ drmgr and use some reserved note label value
* but only if we run out of instr flags. so we set to 0 to not
* overlap w/ any client uses (DRMGR_NOTE_NONE == 0).
*/
label->note = 0;
/* these values are read back in mangle() */
data->data[0] = (ptr_uint_t) instr;
data->data[1] = value;
label->flags |= INSTR_CLOBBER_RETADDR;
instr->flags |= INSTR_CLOBBER_RETADDR;
instrlist_meta_preinsert(ilist, instr, label);
return true;
}
return false;
}
DR_API bool
dr_mcontext_xmm_fields_valid(void)
{
return preserve_xmm_caller_saved();
}
#endif /* CLIENT_INTERFACE */
/* dr_get_mcontext() needed for translating clean call arg errors */
/* Fills in whichever of dmc or mc is non-NULL */
bool
dr_get_mcontext_priv(dcontext_t *dcontext, dr_mcontext_t *dmc, priv_mcontext_t *mc)
{
priv_mcontext_t *state;
CLIENT_ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)),
"DR context protection NYI");
if (mc == NULL) {
CLIENT_ASSERT(dmc != NULL, "invalid context");
/* catch uses that forget to set size: perhaps in a few releases,
* when most old clients have been converted, remove this (we'll
* still return false)
*/
CLIENT_ASSERT(dmc->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(dmc->flags != 0 && (dmc->flags & ~(DR_MC_ALL)) == 0,
"dr_mcontext_t.flags field not set properly");
} else
CLIENT_ASSERT(dmc == NULL, "invalid internal params");
#ifdef CLIENT_INTERFACE
/* i#117/PR 395156: support getting mcontext from events where mcontext is
* stable. It would be nice to support it from init and 1st thread init,
* but the mcontext is not available at those points.
*
* Since DR calls this routine when recreating state and wants the
* clean call version, can't distinguish by whereami=DR_WHERE_FCACHE,
* so we set a flag in the supported events. If client routine
* crashes and we recreate then we want clean call version anyway
* so should be ok. Note that we want in_pre_syscall for other
* reasons (dr_syscall_set_param() for Windows) so we keep it a
* separate flag.
*/
/* no support for init or initial thread init */
if (!dynamo_initialized)
return false;
if (dcontext->client_data->cur_mc != NULL) {
if (mc != NULL)
*mc = *dcontext->client_data->cur_mc;
else if (!priv_mcontext_to_dr_mcontext(dmc, dcontext->client_data->cur_mc))
return false;
return true;
}
if (!is_os_cxt_ptr_null(dcontext->client_data->os_cxt)) {
return os_context_to_mcontext(dmc, mc, dcontext->client_data->os_cxt);
}
if (dcontext->client_data->suspended) {
/* A thread suspended by dr_suspend_all_other_threads() has its
* context translated lazily here.
* We cache the result in cur_mc to avoid a translation cost next time.
*/
bool res;
priv_mcontext_t *mc_xl8;
if (mc != NULL)
mc_xl8 = mc;
else {
dcontext->client_data->cur_mc = (priv_mcontext_t *)
heap_alloc(dcontext, sizeof(*dcontext->client_data->cur_mc)
HEAPACCT(ACCT_CLIENT));
/* We'll clear this cache in dr_resume_all_other_threads() */
mc_xl8 = dcontext->client_data->cur_mc;
}
res = thread_get_mcontext(dcontext->thread_record, mc_xl8);
CLIENT_ASSERT(res, "failed to get mcontext of suspended thread");
res = translate_mcontext(dcontext->thread_record, mc_xl8,
false/*do not restore memory*/, NULL);
CLIENT_ASSERT(res, "failed to xl8 mcontext of suspended thread");
if (mc == NULL && !priv_mcontext_to_dr_mcontext(dmc, mc_xl8))
return false;
return true;
}
/* PR 207947: support mcontext access from syscall events */
if (dcontext->client_data->mcontext_in_dcontext ||
dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall) {
if (mc != NULL)
*mc = *get_mcontext(dcontext);
else if (!priv_mcontext_to_dr_mcontext(dmc, get_mcontext(dcontext)))
return false;
return true;
}
#endif
/* dr_prepare_for_call() puts the machine context on the dstack
* with pusha and pushf, but only fills in xmm values for
* preserve_xmm_caller_saved(): however, we tell the client that the xmm
* fields are not valid otherwise. so, we just have to copy the
* state from the dstack.
*/
state = get_priv_mcontext_from_dstack(dcontext);
if (mc != NULL)
*mc = *state;
else if (!priv_mcontext_to_dr_mcontext(dmc, state))
return false;
/* esp is a dstack value -- get the app stack's esp from the dcontext */
if (mc != NULL)
mc->xsp = get_mcontext(dcontext)->xsp;
else if (TEST(DR_MC_CONTROL, dmc->flags))
dmc->xsp = get_mcontext(dcontext)->xsp;
#ifdef ARM
if (TEST(DR_MC_INTEGER, dmc->flags)) {
/* get the stolen register's app value */
if (mc != NULL)
set_stolen_reg_val(mc, (reg_t) get_tls(os_tls_offset(TLS_REG_STOLEN_SLOT)));
else {
set_stolen_reg_val(dr_mcontext_as_priv_mcontext(dmc),
(reg_t) get_tls(os_tls_offset(TLS_REG_STOLEN_SLOT)));
}
}
#endif
/* XXX: should we set the pc field?
* If we do we'll have to adopt a different solution for i#1685 in our Windows
* hooks where today we use the pc slot for temp storage.
*/
return true;
}
DR_API bool
dr_get_mcontext(void *drcontext, dr_mcontext_t *dmc)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
return dr_get_mcontext_priv(dcontext, dmc, NULL);
}
#ifdef CLIENT_INTERFACE
DR_API bool
dr_set_mcontext(void *drcontext, dr_mcontext_t *context)
{
priv_mcontext_t *state;
dcontext_t *dcontext = (dcontext_t *)drcontext;
IF_ARM(reg_t reg_val = 0 /* silence the compiler warning */;)
CLIENT_ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)),
"DR context protection NYI");
CLIENT_ASSERT(context != NULL, "invalid context");
CLIENT_ASSERT(context->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(context->flags != 0 && (context->flags & ~(DR_MC_ALL)) == 0,
"dr_mcontext_t.flags field not set properly");
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
/* PR 207947: support mcontext access from syscall events */
if (dcontext->client_data->mcontext_in_dcontext ||
dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall) {
if (!dr_mcontext_to_priv_mcontext(get_mcontext(dcontext), context))
return false;
return true;
}
if (dcontext->client_data->cur_mc != NULL) {
return dr_mcontext_to_priv_mcontext(dcontext->client_data->cur_mc, context);
}
if (!is_os_cxt_ptr_null(dcontext->client_data->os_cxt)) {
/* It would be nice to fail for #DR_XFER_CALLBACK_RETURN but we'd need to
* store yet more state to do so. The pc will be ignored, and xsi
* changes will likely cause crashes.
*/
return mcontext_to_os_context(dcontext->client_data->os_cxt, context, NULL);
}
/* copy the machine context to the dstack area created with
* dr_prepare_for_call(). note that xmm0-5 copied there
* will override any save_fpstate xmm values, as desired.
*/
state = get_priv_mcontext_from_dstack(dcontext);
#ifdef ARM
if (TEST(DR_MC_INTEGER, context->flags)) {
/* Set the stolen register's app value in TLS, not on stack (we rely
* on our stolen reg retaining its value on the stack)
*/
priv_mcontext_t *mc = dr_mcontext_as_priv_mcontext(context);
set_tls(os_tls_offset(TLS_REG_STOLEN_SLOT), (void *) get_stolen_reg_val(mc));
/* save the reg val on the stack to be clobbered by the the copy below */
reg_val = get_stolen_reg_val(state);
}
#endif
if (!dr_mcontext_to_priv_mcontext(state, context))
return false;
#ifdef ARM
if (TEST(DR_MC_INTEGER, context->flags)) {
/* restore the reg val on the stack clobbered by the copy above */
set_stolen_reg_val(state, reg_val);
}
#endif
if (TEST(DR_MC_CONTROL, context->flags)) {
/* esp will be restored from a field in the dcontext */
get_mcontext(dcontext)->xsp = context->xsp;
}
/* XXX: should we support setting the pc field? */
return true;
}
DR_API
bool
dr_redirect_execution(dr_mcontext_t *mcontext)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
CLIENT_ASSERT(mcontext->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(mcontext->flags == DR_MC_ALL,
"dr_mcontext_t.flags must be DR_MC_ALL");
/* PR 352429: squash current trace.
* FIXME: will clients use this so much that this will be a perf issue?
* samples/cbr doesn't hit this even at -trace_threshold 1
*/
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_INTERP, 1, "squashing trace-in-progress\n");
trace_abort(dcontext);
}
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
dcontext->whereami = DR_WHERE_FCACHE;
set_last_exit(dcontext, (linkstub_t *)get_client_linkstub());
#ifdef CLIENT_INTERFACE
if (kernel_xfer_callbacks.num > 0) {
/* This can only be called from a clean call or an exception event.
* For both of those we can get the current mcontext via dr_get_mcontext()
* (the latter b/c we explicitly store to cur_mc just for this use case).
*/
dr_mcontext_t src_dmc;
src_dmc.size = sizeof(src_dmc);
src_dmc.flags = DR_MC_CONTROL | DR_MC_INTEGER;
dr_get_mcontext(dcontext, &src_dmc);
if (instrument_kernel_xfer(dcontext, DR_XFER_CLIENT_REDIRECT,
osc_empty, &src_dmc, NULL,
dcontext->next_tag, mcontext->xsp, osc_empty,
dr_mcontext_as_priv_mcontext(mcontext), 0))
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
}
#endif
transfer_to_dispatch(dcontext, dr_mcontext_as_priv_mcontext(mcontext),
true/*full_DR_state*/);
/* on success we won't get here */
return false;
}
DR_API
byte *
dr_redirect_native_target(void *drcontext)
{
#ifdef PROGRAM_SHEPHERDING
/* This feature is unavail for prog shep b/c of the cross-ib-type pollution,
* as well as the lack of source tag info when exiting the ibl (i#1150).
*/
return NULL;
#else
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_redirect_native_target(): drcontext cannot be NULL");
/* The client has no way to know the mode of our gencode so we set LSB here */
return PC_AS_JMP_TGT(DEFAULT_ISA_MODE, get_client_ibl_xfer_entry(dcontext));
#endif
}
/***************************************************************************
* ADAPTIVE OPTIMIZATION SUPPORT
* *Note for non owning thread support (i.e. sideline) all methods assume
* the dcontext valid, the client will have to insure this with a lock
* on thread_exit!!
*
* *need way for side thread to get a dcontext to use for logging and mem
* alloc, before do that should think more about mem alloc in/for adaptive
* routines
*
* *made local mem alloc by side thread safe (see heap.c)
*
* *loging not safe if not owning thread?
*/
DR_API
/* Schedules the fragment to be deleted. Once this call is completed,
* an existing executing fragment is allowed to complete, but control
* will not enter the fragment again before it is deleted.
*
* NOTE: this comment used to say, "after deletion, control may still
* reach the fragment by indirect branch.". We believe this is now only
* true for shared fragments, which are not currently supported.
*/
bool
dr_delete_fragment(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool deletable = false, waslinking;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(!SHARED_FRAGMENTS_ENABLED(),
"dr_delete_fragment() only valid with -thread_private");
CLIENT_ASSERT(drcontext != NULL, "dr_delete_fragment(): drcontext cannot be NULL");
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_delete_fragment not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return false;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
#ifdef CLIENT_SIDELINE
mutex_lock(&(dcontext->client_data->sideline_mutex));
fragment_get_fragment_delete_mutex(dcontext);
#else
CLIENT_ASSERT(drcontext == get_thread_private_dcontext(),
"dr_delete_fragment(): drcontext does not belong to current thread");
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL && (f->flags & FRAG_CANNOT_DELETE) == 0) {
client_todo_list_t * todo = HEAP_TYPE_ALLOC(dcontext, client_todo_list_t,
ACCT_CLIENT, UNPROTECTED);
client_todo_list_t * iter = dcontext->client_data->to_do;
todo->next = NULL;
todo->ilist = NULL;
todo->tag = tag;
if (iter == NULL)
dcontext->client_data->to_do = todo;
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = todo;
}
deletable = true;
/* unlink fragment so will return to dynamo and delete.
* Do not remove the fragment from the hashtable --
* we need to be able to look up the fragment when
* inspecting the to_do list in dispatch.
*/
if ((f->flags & FRAG_LINKED_INCOMING) != 0)
unlink_fragment_incoming(dcontext, f);
fragment_remove_from_ibt_tables(dcontext, f, false);
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
mutex_unlock(&(dcontext->client_data->sideline_mutex));
#endif
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return deletable;
}
DR_API
/* Schedules the fragment at 'tag' for replacement. Once this call is
* completed, an existing executing fragment is allowed to complete,
* but control will not enter the fragment again before it is replaced.
*
* NOTE: this comment used to say, "after replacement, control may still
* reach the fragment by indirect branch.". We believe this is now only
* true for shared fragments, which are not currently supported.
*
* Takes control of the ilist and all responsibility for deleting it and the
* instrs inside of it. The client should not keep, use, reference, etc. the
* instrlist or any of the instrs it contains after they are passed in.
*/
bool
dr_replace_fragment(void *drcontext, void *tag, instrlist_t *ilist)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
bool frag_found, waslinking;
fragment_t * f;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(!SHARED_FRAGMENTS_ENABLED(),
"dr_replace_fragment() only valid with -thread_private");
CLIENT_ASSERT(drcontext != NULL, "dr_replace_fragment(): drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_replace_fragment: drcontext is invalid");
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_replace_fragment not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return false;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
#ifdef CLIENT_SIDELINE
mutex_lock(&(dcontext->client_data->sideline_mutex));
fragment_get_fragment_delete_mutex(dcontext);
#else
CLIENT_ASSERT(drcontext == get_thread_private_dcontext(),
"dr_replace_fragment(): drcontext does not belong to current thread");
#endif
f = fragment_lookup(dcontext, tag);
frag_found = (f != NULL);
if (frag_found) {
client_todo_list_t * iter = dcontext->client_data->to_do;
client_todo_list_t * todo = HEAP_TYPE_ALLOC(dcontext, client_todo_list_t,
ACCT_CLIENT, UNPROTECTED);
todo->next = NULL;
todo->ilist = ilist;
todo->tag = tag;
if (iter == NULL)
dcontext->client_data->to_do = todo;
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = todo;
}
/* unlink fragment so will return to dynamo and replace for next time
* its executed
*/
if ((f->flags & FRAG_LINKED_INCOMING) != 0)
unlink_fragment_incoming(dcontext, f);
fragment_remove_from_ibt_tables(dcontext, f, false);
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
mutex_unlock(&(dcontext->client_data->sideline_mutex));
#endif
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return frag_found;
}
#ifdef UNSUPPORTED_API
/* FIXME - doesn't work with shared fragments. Consider removing since dr_flush_region
* and dr_delay_flush_region give us most of this functionality. */
DR_API
/* Flushes all fragments containing 'flush_tag', or the entire code
* cache if flush_tag is NULL. 'curr_tag' must specify the tag of the
* currently-executing fragment. If curr_tag is NULL, flushing can be
* delayed indefinitely. Note that flushing is performed across all
* threads, but other threads may continue to execute fragments
* containing 'curr_tag' until those fragments finish.
*/
void dr_flush_fragments(void *drcontext, void *curr_tag, void *flush_tag)
{
client_flush_req_t *iter, *flush;
dcontext_t *dcontext = (dcontext_t *)drcontext;
/* We want to unlink the currently executing fragment so we'll
* force a context switch to DR. That way, we'll perform the
* flush as soon as possible. Unfortunately, the client may not
* know the tag of the current trace. Therefore, we unlink all
* fragments in the region.
*
* Note that we aren't unlinking or ibl-invalidating (i.e., making
* unreachable) any fragments in other threads containing curr_tag
* until the delayed flush happens in enter_nolinking().
*/
if (curr_tag != NULL)
vm_area_unlink_incoming(dcontext, (app_pc)curr_tag);
flush = HEAP_TYPE_ALLOC(dcontext, client_flush_req_t, ACCT_CLIENT, UNPROTECTED);
flush->flush_callback = NULL;
if (flush_tag == NULL) {
flush->start = UNIVERSAL_REGION_BASE;
flush->size = UNIVERSAL_REGION_SIZE;
} else {
flush->start = (app_pc)flush_tag;
flush->size = 1;
}
flush->next = NULL;
iter = dcontext->client_data->flush_list;
if (iter == NULL) {
dcontext->client_data->flush_list = flush;
}
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = flush;
}
}
#endif /* UNSUPPORTED_API */
DR_API
/* Flush all fragments that contain code from the region [start, start+size).
* Uses a synchall flush to guarantee that no execution occurs out of the fragments
* flushed once this returns. Requires caller to be holding no locks (dr or client) and
* to be !couldbelinking (xref PR 199115, 227619). Caller must use
* dr_redirect_execution() to return to the cache. */
bool
dr_flush_region(app_pc start, size_t size)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n", __FUNCTION__, start, start+size);
/* Flush requires !couldbelinking. FIXME - not all event callbacks to the client are
* !couldbelinking (see PR 227619) restricting where this routine can be used. */
CLIENT_ASSERT(!is_couldbelinking(dcontext), "dr_flush_region: called from an event "
"callback that doesn't support calling this routine; see header file "
"for restrictions.");
/* Flush requires caller to hold no locks that might block a couldbelinking thread
* (which includes almost all dr locks). FIXME - some event callbacks are holding
* dr locks (see PR 227619) so can't call this routine. Since we are going to use
* a synchall flush, holding client locks is disallowed too (could block a thread
* at an unsafe spot for synch). */
CLIENT_ASSERT(OWN_NO_LOCKS(dcontext), "dr_flush_region: caller owns a client "
"lock or was called from an event callback that doesn't support "
"calling this routine; see header file for restrictions.");
CLIENT_ASSERT(size != 0, "dr_flush_region: 0 is invalid size for flush");
/* release build check of requirements, as many as possible at least */
if (size == 0 || is_couldbelinking(dcontext))
return false;
if (!executable_vm_area_executed_from(start, start + size))
return true;
flush_fragments_from_region(dcontext, start, size, true/*force synchall*/);
return true;
}
DR_API
/* Flush all fragments that contain code from the region [start, start+size).
* Uses an unlink flush which guarantees that no thread will enter a fragment that was
* flushed once this returns (threads already in a flushed fragment will continue).
* Requires caller to be holding no locks (dr or client) and to be !couldbelinking
* (xref PR 199115, 227619). */
bool
dr_unlink_flush_region(app_pc start, size_t size)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n", __FUNCTION__, start, start+size);
/* This routine won't work with coarse_units */
CLIENT_ASSERT(!DYNAMO_OPTION(coarse_units),
/* as of now, coarse_units are always disabled with -thread_private. */
"dr_unlink_flush_region is not supported with -opt_memory unless "
"-thread_private or -enable_full_api is also specified");
/* Flush requires !couldbelinking. FIXME - not all event callbacks to the client are
* !couldbelinking (see PR 227619) restricting where this routine can be used. */
CLIENT_ASSERT(!is_couldbelinking(dcontext), "dr_flush_region: called from an event "
"callback that doesn't support calling this routine, see header file "
"for restrictions.");
/* Flush requires caller to hold no locks that might block a couldbelinking thread
* (which includes almost all dr locks). FIXME - some event callbacks are holding
* dr locks (see PR 227619) so can't call this routine. FIXME - some event callbacks
* are couldbelinking (see PR 227619) so can't allow the caller to hold any client
* locks that could block threads in one of those events (otherwise we don't need
* to care about client locks) */
CLIENT_ASSERT(OWN_NO_LOCKS(dcontext), "dr_flush_region: caller owns a client "
"lock or was called from an event callback that doesn't support "
"calling this routine, see header file for restrictions.");
CLIENT_ASSERT(size != 0, "dr_unlink_flush_region: 0 is invalid size for flush");
/* release build check of requirements, as many as possible at least */
if (size == 0 || is_couldbelinking(dcontext))
return false;
if (!executable_vm_area_executed_from(start, start + size))
return true;
flush_fragments_from_region(dcontext, start, size, false/*don't force synchall*/);
return true;
}
DR_API
/* Flush all fragments that contain code from the region [start, start+size) at the next
* convenient time. Unlike dr_flush_region() this routine has no restrictions on lock
* or couldbelinking status; the downside is that the delay till the flush actually
* occurs is unbounded (FIXME - we could do something safely here to try to speed it
* up like unlinking shared_syscall etc.), but should occur before any new code is
* executed or any nudges are processed. */
bool
dr_delay_flush_region(app_pc start, size_t size, uint flush_id,
void (*flush_completion_callback) (int flush_id))
{
client_flush_req_t *flush;
LOG(THREAD_GET, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n",
__FUNCTION__, start, start+size);
if (size == 0) {
CLIENT_ASSERT(false, "dr_delay_flush_region: 0 is invalid size for flush");
return false;
}
/* With the new module load event at 1st execution (i#884), we get a lot of
* flush requests during creation of a bb from things like drwrap_replace().
* To avoid them flushing from a new module we check overlap up front here.
*/
if (!executable_vm_area_executed_from(start, start+size)) {
return true;
}
/* FIXME - would be nice if we could check the requirements and call
* dr_unlink_flush_region() here if it's safe. Is difficult to detect non-dr locks
* that could block a couldbelinking thread though. */
flush = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_flush_req_t, ACCT_CLIENT,
UNPROTECTED);
memset(flush, 0x0, sizeof(client_flush_req_t));
flush->start = (app_pc)start;
flush->size = size;
flush->flush_id = flush_id;
flush->flush_callback = flush_completion_callback;
mutex_lock(&client_flush_request_lock);
flush->next = client_flush_requests;
client_flush_requests = flush;
mutex_unlock(&client_flush_request_lock);
return true;
}
DR_API
/* returns whether or not there is a fragment in the drcontext fcache at tag
*/
bool
dr_fragment_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return f != NULL;
}
DR_API
bool
dr_bb_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f = fragment_lookup(dcontext, tag);
if (f != NULL && !TEST(FRAG_IS_TRACE, f->flags)) {
return true;
}
return false;
}
DR_API
/* Looks up the fragment associated with the application pc tag.
* If not found, returns 0.
* If found, returns the total size occupied in the cache by the fragment.
*/
uint
dr_fragment_size(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
fragment_t *f;
int size = 0;
CLIENT_ASSERT(drcontext != NULL, "dr_fragment_size: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_fragment_size: drcontext is invalid");
#ifdef CLIENT_SIDELINE
/* used to check to see if owning thread, if so don't need lock */
/* but the check for owning thread more expensive then just getting lock */
/* to check if owner get_thread_id() == dcontext->owning_thread */
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f == NULL)
size = 0;
else
size = f->size;
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return size;
}
DR_API
/* Retrieves the application PC of a fragment */
app_pc
dr_fragment_app_pc(void *tag)
{
#ifdef WINDOWS
tag = get_app_pc_from_intercept_pc_if_necessary((app_pc)tag);
CLIENT_ASSERT(tag != NULL, "dr_fragment_app_pc shouldn't be NULL");
DODEBUG({
/* Without -hide our DllMain routine ends up in the cache (xref PR 223120).
* On Linux fini() ends up in the cache.
*/
if (DYNAMO_OPTION(hide) && is_dynamo_address(tag) &&
/* support client interpreting code out of its library */
!is_in_client_lib(tag)) {
/* downgraded from assert for client interpreting its own generated code */
SYSLOG_INTERNAL_WARNING_ONCE("dr_fragment_app_pc is a DR/client pc");
}
});
#elif defined(LINUX) && defined(X86_32)
/* Point back at our hook, undoing the bb shift for SA_RESTART (i#2659). */
if ((app_pc)tag == vsyscall_sysenter_displaced_pc)
tag = vsyscall_sysenter_return_pc;
#endif
return tag;
}
DR_API
/* i#268: opposite of dr_fragment_app_pc() */
app_pc
dr_app_pc_for_decoding(app_pc pc)
{
#ifdef WINDOWS
app_pc displaced;
if (is_intercepted_app_pc(pc, &displaced))
return displaced;
#endif
return pc;
}
DR_API
app_pc
dr_app_pc_from_cache_pc(byte *cache_pc)
{
app_pc res = NULL;
dcontext_t *dcontext = get_thread_private_dcontext();
bool waslinking;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_app_pc_from_cache_pc not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return NULL;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
/* suppress asserts about faults in meta instrs */
DODEBUG({ dcontext->client_data->is_translating = true; });
res = recreate_app_pc(dcontext, cache_pc, NULL);
DODEBUG({ dcontext->client_data->is_translating = false; });
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return res;
}
DR_API
bool
dr_using_app_state(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
return os_using_app_state(dcontext);
}
DR_API
void
dr_switch_to_app_state(void *drcontext)
{
dr_switch_to_app_state_ex(drcontext, DR_STATE_ALL);
}
DR_API
void
dr_switch_to_app_state_ex(void *drcontext, dr_state_flags_t flags)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
os_swap_context(dcontext, true/*to app*/, flags);
}
DR_API
void
dr_switch_to_dr_state(void *drcontext)
{
dr_switch_to_dr_state_ex(drcontext, DR_STATE_ALL);
}
DR_API
void
dr_switch_to_dr_state_ex(void *drcontext, dr_state_flags_t flags)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
os_swap_context(dcontext, false/*to dr*/, flags);
}
/***************************************************************************
* CUSTOM TRACES SUPPORT
* *could use a method to unmark a trace head, would be nice if DR
* notified the client when it marked a trace head and gave the client a
* chance to override its decision
*/
DR_API
/* Marks the fragment associated with the application pc tag as
* a trace head. The fragment need not exist yet -- once it is
* created it will be marked as a trace head.
*
* DR associates a counter with a trace head and once it
* passes the -hot_threshold parameter, DR begins building
* a trace. Before each fragment is added to the trace, DR
* calls the client routine dr_end_trace to determine whether
* to end the trace. (dr_end_trace will be called both for
* standard DR traces and for client-defined traces.)
*
* Note, some fragments are unsuitable for trace heads. DR will
* ignore attempts to mark such fragments as trace heads and will return
* false. If the client marks a fragment that doesn't exist yet as a trace
* head and DR later determines that the fragment is unsuitable for
* a trace head it will unmark the fragment as a trace head without
* notifying the client.
*
* Returns true if the target fragment is marked as a trace head.
*
* If coarse, headness depends on path: currently this will only have
* links from tag's coarse unit unlinked.
*/
bool /* FIXME: dynamorio_app_init returns an int! */
dr_mark_trace_head(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
fragment_t *f;
fragment_t coarse_f;
bool success = true;
CLIENT_ASSERT(drcontext != NULL, "dr_mark_trace_head: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_mark_trace_head: drcontext is invalid");
/* Required to make the future-fragment lookup and add atomic and for
* mark_trace_head. We have to grab before fragment_delete_mutex so
* we pay the cost of acquiring up front even when f->flags doesn't
* require it.
*/
SHARED_FLAGS_RECURSIVE_LOCK(FRAG_SHARED, acquire, change_linking_lock);
#ifdef CLIENT_SIDELINE
/* used to check to see if owning thread, if so don't need lock */
/* but the check for owning thread more expensive then just getting lock */
/* to check if owner get_thread_id() == dcontext->owning_thread */
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup_fine_and_coarse(dcontext, tag, &coarse_f, NULL);
if (f == NULL) {
future_fragment_t *fut;
fut = fragment_lookup_future(dcontext, tag);
if (fut == NULL) {
/* need to create a future fragment */
fut = fragment_create_and_add_future(dcontext, tag, FRAG_IS_TRACE_HEAD);
} else {
/* don't call mark_trace_head, it will try to do some linking */
fut->flags |= FRAG_IS_TRACE_HEAD;
}
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : will mark fragment as trace head when built "
": address "PFX"\n", tag);
#endif
} else {
/* check precluding conditions */
if (TEST(FRAG_IS_TRACE, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : not marking as trace head, is already "
"a trace : address "PFX"\n", tag);
#endif
success = false;
} else if (TEST(FRAG_CANNOT_BE_TRACE, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : not marking as trace head, particular "
"fragment cannot be trace head : address "PFX"\n", tag);
#endif
success = false;
} else if (TEST(FRAG_IS_TRACE_HEAD, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : fragment already marked as trace head : "
"address "PFX"\n", tag);
#endif
success = true;
} else {
mark_trace_head(dcontext, f, NULL, NULL);
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 3,
"Client mark trace head : just marked as trace head : address "PFX"\n",
tag);
#endif
}
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
SHARED_FLAGS_RECURSIVE_LOCK(FRAG_SHARED, release, change_linking_lock);
return success;
}
DR_API
/* Checks to see if the fragment (or future fragment) in the drcontext
* fcache at tag is marked as a trace head
*/
bool
dr_trace_head_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool trace_head;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL)
trace_head = (f->flags & FRAG_IS_TRACE_HEAD) != 0;
else {
future_fragment_t *fut = fragment_lookup_future(dcontext, tag);
if (fut != NULL)
trace_head = (fut->flags & FRAG_IS_TRACE_HEAD) != 0;
else
trace_head = false;
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return trace_head;
}
DR_API
/* checks to see that if there is a trace in the drcontext fcache at tag
*/
bool
dr_trace_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool trace;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL)
trace = (f->flags & FRAG_IS_TRACE) != 0;
else
trace = false;
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return trace;
}
#ifdef UNSUPPORTED_API
DR_API
/* All basic blocks created after this routine is called will have a prefix
* that restores the ecx register. Exit ctis can be made to target this prefix
* instead of the normal entry point by using the instr_branch_set_prefix_target()
* routine.
* WARNING: this routine should almost always be called during client
* initialization, since having a mixture of prefixed and non-prefixed basic
* blocks can lead to trouble.
*/
void
dr_add_prefixes_to_basic_blocks(void)
{
if (DYNAMO_OPTION(coarse_units)) {
/* coarse_units doesn't support prefixes in general.
* the variation by addr prefix according to processor type
* is also not stored in pcaches.
*/
CLIENT_ASSERT(false,
"dr_add_prefixes_to_basic_blocks() not supported with -opt_memory");
}
options_make_writable();
dynamo_options.bb_prefixes = true;
options_restore_readonly();
}
#endif /* UNSUPPORTED_API */
DR_API
/* Insert code to get the segment base address pointed at by seg into
* register reg. In Linux, it is only supported with -mangle_app_seg option.
* In Windows, it only supports getting base address of the TLS segment.
*/
bool
dr_insert_get_seg_base(void *drcontext, instrlist_t *ilist, instr_t *instr,
reg_id_t seg, reg_id_t reg)
{
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_get_seg_base: reg has wrong size\n");
#ifdef X86
CLIENT_ASSERT(reg_is_segment(seg),
"dr_insert_get_seg_base: seg is not a segment register");
# ifdef UNIX
CLIENT_ASSERT(INTERNAL_OPTION(mangle_app_seg),
"dr_insert_get_seg_base is supported"
"with -mangle_app_seg only");
/* FIXME: we should remove the constraint below by always mangling SEG_TLS,
* 1. Getting TLS base could be a common request by clients.
* 2. The TLS descriptor setup and selector setup can be separated,
* so we must intercept all descriptor setup. It will not be large
* runtime overhead for keeping track of the app's TLS segment base.
*/
CLIENT_ASSERT(INTERNAL_OPTION(private_loader) || seg != SEG_TLS,
"dr_insert_get_seg_base supports TLS seg"
"only with -private_loader");
if (!INTERNAL_OPTION(mangle_app_seg) ||
!(INTERNAL_OPTION(private_loader) || seg != SEG_TLS))
return false;
if (seg == SEG_FS || seg == SEG_GS) {
instrlist_meta_preinsert
(ilist, instr,
instr_create_restore_from_tls(drcontext, reg,
os_get_app_tls_base_offset(seg)));
} else {
instrlist_meta_preinsert
(ilist, instr,
INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(reg),
OPND_CREATE_INTPTR(0)));
}
# else /* Windows */
if (seg == SEG_TLS) {
instrlist_meta_preinsert
(ilist, instr,
XINST_CREATE_load(drcontext,
opnd_create_reg(reg),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, SELF_TIB_OFFSET, OPSZ_PTR)));
} else if (seg == SEG_CS || seg == SEG_DS || seg == SEG_ES || seg == SEG_SS) {
/* XXX: we assume flat address space */
instrlist_meta_preinsert
(ilist, instr,
INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(reg),
OPND_CREATE_INTPTR(0)));
} else
return false;
# endif /* UNIX/Windows */
#elif defined (ARM)
/* i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
return true;
}
DR_API
reg_id_t
dr_get_stolen_reg()
{
return IF_X86_ELSE(REG_NULL, dr_reg_stolen);
}
DR_API
bool
dr_insert_get_stolen_reg_value(void *drcontext, instrlist_t *ilist,
instr_t *instr, reg_id_t reg)
{
IF_X86(CLIENT_ASSERT(false, "dr_insert_get_stolen_reg: should not be reached\n"));
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_get_stolen_reg: reg has wrong size\n");
CLIENT_ASSERT(!reg_is_stolen(reg),
"dr_insert_get_stolen_reg: reg is used by DynamoRIO\n");
#ifdef AARCHXX
instrlist_meta_preinsert
(ilist, instr,
instr_create_restore_from_tls(drcontext, reg, TLS_REG_STOLEN_SLOT));
#endif
return true;
}
DR_API
bool
dr_insert_set_stolen_reg_value(void *drcontext, instrlist_t *ilist,
instr_t *instr, reg_id_t reg)
{
IF_X86(CLIENT_ASSERT(false, "dr_insert_set_stolen_reg: should not be reached\n"));
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_set_stolen_reg: reg has wrong size\n");
CLIENT_ASSERT(!reg_is_stolen(reg),
"dr_insert_set_stolen_reg: reg is used by DynamoRIO\n");
#ifdef AARCHXX
instrlist_meta_preinsert
(ilist, instr,
instr_create_save_to_tls(drcontext, reg, TLS_REG_STOLEN_SLOT));
#endif
return true;
}
DR_API
int
dr_remove_it_instrs(void *drcontext, instrlist_t *ilist)
{
#if !defined(ARM)
return 0;
#else
int res = 0;
instr_t *inst, *next;
for (inst = instrlist_first(ilist); inst != NULL; inst = next) {
next = instr_get_next(inst);
if (instr_get_opcode(inst) == OP_it) {
res++;
instrlist_remove(ilist, inst);
instr_destroy(drcontext, inst);
}
}
return res;
#endif
}
DR_API
int
dr_insert_it_instrs(void *drcontext, instrlist_t *ilist)
{
#if !defined(ARM)
return 0;
#else
instr_t *first = instrlist_first(ilist);
if (first == NULL || instr_get_isa_mode(first) != DR_ISA_ARM_THUMB)
return 0;
return reinstate_it_blocks((dcontext_t*)drcontext, ilist,
instrlist_first(ilist), NULL);
#endif
}
DR_API
bool
dr_prepopulate_cache(app_pc *tags, size_t tags_count)
{
/* We expect get_thread_private_dcontext() to return NULL b/c we're between
* dr_app_setup() and dr_app_start() and are considered a "native" thread
* with disabled TLS. We do set up TLS as too many routines fail (e.g.,
* clean call analysis) with NULL from TLS, but we do not set up signal
* handling: the caller has to handle decode faults, as we do not
* want to enable our signal handlers, which might disrupt the app running
* natively in parallel with us.
*/
thread_record_t *tr = thread_lookup(get_thread_id());
dcontext_t *dcontext = tr->dcontext;
uint i;
if (dcontext == NULL)
return false;
SHARED_BB_LOCK();
SYSLOG_INTERNAL_INFO("pre-building code cache from %d tags", tags_count);
#ifdef UNIX
os_swap_context(dcontext, false/*to dr*/, DR_STATE_GO_NATIVE);
#endif
for (i = 0; i < tags_count; i++) {
/* There could be duplicates if sthg was deleted and re-added during profiling */
fragment_t coarse_f;
fragment_t *f;
#ifdef UNIX
/* We silently skip DR-segment-reading addresses to help out a caller
* who sampled and couldn't avoid self-sampling for decoding.
*/
if (is_DR_segment_reader_entry(tags[i]))
continue;
#endif
f = fragment_lookup_fine_and_coarse(dcontext, tags[i], &coarse_f, NULL);
if (f == NULL) {
/* For coarse-grain we won't link as that's done during execution,
* but for fine-grained this should produce a fully warmed cache.
*/
f = build_basic_block_fragment(dcontext, tags[i],
0, true/*link*/, true/*visible*/
_IF_CLIENT(false/*!for_trace*/)
_IF_CLIENT(NULL));
}
ASSERT(f != NULL);
/* We're ok making a thread-private fragment: might be a waste if this
* thread never runs it, but simpler than trying to skip them or sthg.
*/
}
#ifdef UNIX
os_swap_context(dcontext, true/*to app*/, DR_STATE_GO_NATIVE);
#endif
SHARED_BB_UNLOCK();
return true;
}
/***************************************************************************
* PERSISTENCE
*/
/* Up to caller to synchronize. */
uint
instrument_persist_ro_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
size_t i;
/* Store the set of clients in use as we require the same set in order
* to validate the pcache on use. Note that we can't just have -client_lib
* be OP_PCACHE_GLOBAL b/c it contains client options too.
* We have no unique guids for clients so we store the full path.
* We ignore ids. We do care about priority order: clients must
* be in the same order in addition to having the same path.
*
* XXX: we could go further and store client library checksum, etc. hashes,
* but that precludes clients from doing their own proper versioning.
*
* XXX: we could also put the set of clients into the pcache namespace to allow
* simultaneous use of pcaches with different sets of clients (empty set
* vs under tool, in particular): but doesn't really seem useful enough
* for the trouble
*/
for (i=0; i<num_client_libs; i++) {
sz += strlen(client_libs[i].path) + 1/*NULL*/;
}
sz++; /* double NULL ends it */
/* Now for clients' own data.
* For user_data, we assume each sequence of <size, patch, persist> is
* atomic: caller holds a mutex across the sequence. Thus, we can use
* global storage.
*/
if (persist_ro_size_callbacks.num > 0) {
call_all_ret(sz, +=, , persist_ro_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
}
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_ro(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
size_t i;
char nul = '\0';
ASSERT(fd != INVALID_FILE);
for (i=0; i<num_client_libs; i++) {
size_t sz = strlen(client_libs[i].path) + 1/*NULL*/;
if (os_write(fd, client_libs[i].path, sz) != (ssize_t)sz)
return false;
}
/* double NULL ends it */
if (os_write(fd, &nul, sizeof(nul)) != (ssize_t)sizeof(nul))
return false;
/* Now for clients' own data */
if (persist_ro_size_callbacks.num > 0) {
call_all_ret(res, = res &&, , persist_ro_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
}
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_ro(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
size_t i;
const char *c;
ASSERT(map != NULL);
/* Ensure we have the same set of tools (see comments above) */
i = 0;
c = (const char *) map;
while (*c != '\0') {
if (i >= num_client_libs)
return false; /* too many clients */
if (strcmp(client_libs[i].path, c) != 0)
return false; /* client path mismatch */
c += strlen(c) + 1;
i++;
}
if (i < num_client_libs)
return false; /* too few clients */
c++;
/* Now for clients' own data */
if (resurrect_ro_callbacks.num > 0) {
call_all_ret(res, = res &&, , resurrect_ro_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, (byte **) &c);
}
return res;
}
/* Up to caller to synchronize. */
uint
instrument_persist_rx_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
if (persist_rx_size_callbacks.num == 0)
return 0;
call_all_ret(sz, +=, , persist_rx_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_rx(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
ASSERT(fd != INVALID_FILE);
if (persist_rx_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_rx_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_rx(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
ASSERT(map != NULL);
if (resurrect_rx_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , resurrect_rx_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, &map);
return res;
}
/* Up to caller to synchronize. */
uint
instrument_persist_rw_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
if (persist_rw_size_callbacks.num == 0)
return 0;
call_all_ret(sz, +=, , persist_rw_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_rw(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
ASSERT(fd != INVALID_FILE);
if (persist_rw_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_rw_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_rw(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
ASSERT(map != NULL);
if (resurrect_rw_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , resurrect_rx_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, &map);
return res;
}
bool
instrument_persist_patch(dcontext_t *dcontext, void *perscxt,
byte *bb_start, size_t bb_size)
{
bool res = true;
if (persist_patch_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_patch_callbacks,
bool (*)(void *, void *, byte *, size_t, void *),
(void *)dcontext, perscxt, bb_start, bb_size,
persist_user_data[idx]);
return res;
}
DR_API
bool
dr_register_persist_ro(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_ro_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_ro_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_ro_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_ro(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_ro_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_ro_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_ro_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_rx(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_rx_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_rx_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_rx_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_rx(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_rx_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_rx_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_rx_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_rw(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_rw_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_rw_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_rw_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_rw(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_rw_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_rw_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_rw_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_patch(bool (*func_patch)(void *drcontext, void *perscxt,
byte *bb_start, size_t bb_size,
void *user_data))
{
if (func_patch == NULL)
return false;
add_callback(&persist_patch_callbacks, (void (*)(void))func_patch, true);
return true;
}
DR_API
bool
dr_unregister_persist_patch(bool (*func_patch)(void *drcontext, void *perscxt,
byte *bb_start, size_t bb_size,
void *user_data))
{
return remove_callback(&persist_patch_callbacks, (void (*)(void))func_patch, true);
}
DR_API
/* Create instructions for storing pointer-size integer val to dst,
* and then insert them into ilist prior to where.
* The "first" and "last" created instructions are returned.
*/
void
instrlist_insert_mov_immed_ptrsz(void *drcontext, ptr_int_t val, opnd_t dst,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
CLIENT_ASSERT(opnd_get_size(dst) == OPSZ_PTR, "wrong dst size");
insert_mov_immed_ptrsz((dcontext_t *)drcontext, val, dst,
ilist, where, first, last);
}
DR_API
/* Create instructions for pushing pointer-size integer val on the stack,
* and then insert them into ilist prior to where.
* The "first" and "last" created instructions are returned.
*/
void
instrlist_insert_push_immed_ptrsz(void *drcontext, ptr_int_t val,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
insert_push_immed_ptrsz((dcontext_t *)drcontext, val, ilist, where,
first, last);
}
DR_API
void
instrlist_insert_mov_instr_addr(void *drcontext, instr_t *src_inst, byte *encode_pc,
opnd_t dst, instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
CLIENT_ASSERT(opnd_get_size(dst) == OPSZ_PTR, "wrong dst size");
if (encode_pc == NULL) {
/* Pass highest code cache address.
* XXX: unless we're beyond the reservation! Would still be reachable
* from rest of vmcode, but might be higher than vmcode_get_end()!
*/
encode_pc = vmcode_get_end();
}
insert_mov_instr_addr((dcontext_t *)drcontext, src_inst, encode_pc, dst,
ilist, where, first, last);
}
DR_API
void
instrlist_insert_push_instr_addr(void *drcontext, instr_t *src_inst, byte *encode_pc,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
if (encode_pc == NULL) {
/* Pass highest code cache address.
* XXX: unless we're beyond the reservation! Would still be reachable
* from rest of vmcode, but might be higher than vmcode_get_end()!
*/
encode_pc = vmcode_get_end();
}
insert_push_instr_addr((dcontext_t *)drcontext, src_inst, encode_pc,
ilist, where, first, last);
}
#endif /* CLIENT_INTERFACE */
| 1 | 13,120 | style violation: { on own line | DynamoRIO-dynamorio | c |
@@ -421,6 +421,10 @@ static h2o_http1client_body_cb on_head(h2o_http1client_t *client, const char *er
goto AddHeaderDuped;
} else if (token == H2O_TOKEN_LINK) {
h2o_push_path_in_link_header(req, headers[i].value, headers[i].value_len);
+ } else if (token == H2O_TOKEN_X_COMPRESS) {
+ req->compression_hint =
+ h2o_strtosize(headers[i].value, headers[i].value_len) ? H2O_COMPRESS_HINT_ENABLE : H2O_COMPRESS_HINT_DISABLE;
+ goto Skip;
}
/* default behaviour, transfer the header downstream */
AddHeaderDuped: | 1 | /*
* Copyright (c) 2014,2015 DeNA Co., Ltd., Kazuho Oku, Masahiro Nagano
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include "picohttpparser.h"
#include "h2o.h"
#include "h2o/http1.h"
#include "h2o/http1client.h"
#include "h2o/tunnel.h"
struct rp_generator_t {
h2o_generator_t super;
h2o_req_t *src_req;
h2o_http1client_t *client;
struct {
h2o_iovec_t bufs[2]; /* first buf is the request line and headers, the second is the POST content */
int is_head;
} up_req;
h2o_buffer_t *last_content_before_send;
h2o_doublebuffer_t sending;
int is_websocket_handshake;
int had_body_error; /* set if an error happened while fetching the body so that we can propagate the error */
};
struct rp_ws_upgrade_info_t {
h2o_context_t *ctx;
h2o_timeout_t *timeout;
h2o_socket_t *upstream_sock;
};
static h2o_http1client_ctx_t *get_client_ctx(h2o_req_t *req)
{
h2o_req_overrides_t *overrides = req->overrides;
if (overrides != NULL && overrides->client_ctx != NULL)
return overrides->client_ctx;
return &req->conn->ctx->proxy.client_ctx;
}
static h2o_iovec_t rewrite_location(h2o_mem_pool_t *pool, const char *location, size_t location_len, h2o_url_t *match,
const h2o_url_scheme_t *req_scheme, h2o_iovec_t req_authority, h2o_iovec_t req_basepath)
{
h2o_url_t loc_parsed;
if (h2o_url_parse(location, location_len, &loc_parsed) != 0)
goto NoRewrite;
if (loc_parsed.scheme != &H2O_URL_SCHEME_HTTP)
goto NoRewrite;
if (!h2o_lcstris(loc_parsed.host.base, loc_parsed.host.len, match->host.base, match->host.len))
goto NoRewrite;
if (h2o_url_get_port(&loc_parsed) != h2o_url_get_port(match))
goto NoRewrite;
if (loc_parsed.path.len < match->path.len)
goto NoRewrite;
if (memcmp(loc_parsed.path.base, match->path.base, match->path.len) != 0)
goto NoRewrite;
return h2o_concat(pool, req_scheme->name, h2o_iovec_init(H2O_STRLIT("://")), req_authority, req_basepath,
h2o_iovec_init(loc_parsed.path.base + match->path.len, loc_parsed.path.len - match->path.len));
NoRewrite:
return (h2o_iovec_t){NULL};
}
static h2o_iovec_t build_request_merge_headers(h2o_mem_pool_t *pool, h2o_iovec_t merged, h2o_iovec_t added, int seperator)
{
if (added.len == 0)
return merged;
if (merged.len == 0)
return added;
size_t newlen = merged.len + 2 + added.len;
char *buf = h2o_mem_alloc_pool(pool, newlen);
memcpy(buf, merged.base, merged.len);
buf[merged.len] = seperator;
buf[merged.len + 1] = ' ';
memcpy(buf + merged.len + 2, added.base, added.len);
merged.base = buf;
merged.len = newlen;
return merged;
}
/*
* A request without neither Content-Length or Transfer-Encoding header implies a zero-length request body (see 6th rule of RFC 7230
* 3.3.3).
* OTOH, section 3.3.3 states:
*
* A user agent SHOULD send a Content-Length in a request message when
* no Transfer-Encoding is sent and the request method defines a meaning
* for an enclosed payload body. For example, a Content-Length header
* field is normally sent in a POST request even when the value is 0
* (indicating an empty payload body). A user agent SHOULD NOT send a
* Content-Length header field when the request message does not contain
* a payload body and the method semantics do not anticipate such a
* body.
*
* PUT and POST define a meaning for the payload body, let's emit a
* Content-Length header if it doesn't exist already, since the server
* might send a '411 Length Required' response.
*
* see also: ML thread starting at https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0580.html
*/
static int req_requires_content_length(h2o_req_t *req)
{
int is_put_or_post =
(req->method.len >= 1 && req->method.base[0] == 'P' && (h2o_memis(req->method.base, req->method.len, H2O_STRLIT("POST")) ||
h2o_memis(req->method.base, req->method.len, H2O_STRLIT("PUT"))));
return is_put_or_post && h2o_find_header(&req->res.headers, H2O_TOKEN_TRANSFER_ENCODING, -1) == -1;
}
static h2o_iovec_t build_request(h2o_req_t *req, int keepalive, int is_websocket_handshake, int use_proxy_protocol)
{
h2o_iovec_t buf;
size_t offset = 0, remote_addr_len = SIZE_MAX;
char remote_addr[NI_MAXHOST];
struct sockaddr_storage ss;
socklen_t sslen;
h2o_iovec_t cookie_buf = {NULL}, xff_buf = {NULL}, via_buf = {NULL};
int preserve_x_forwarded_proto = req->conn->ctx->globalconf->proxy.preserve_x_forwarded_proto;
int emit_x_forwarded_headers = req->conn->ctx->globalconf->proxy.emit_x_forwarded_headers;
/* for x-f-f */
if ((sslen = req->conn->callbacks->get_peername(req->conn, (void *)&ss)) != 0)
remote_addr_len = h2o_socket_getnumerichost((void *)&ss, sslen, remote_addr);
/* build response */
buf.len = req->method.len + req->path.len + req->authority.len + 512;
if (use_proxy_protocol)
buf.len += H2O_PROXY_HEADER_MAX_LENGTH;
buf.base = h2o_mem_alloc_pool(&req->pool, buf.len);
#define RESERVE(sz) \
do { \
size_t required = offset + sz + 4 /* for "\r\n\r\n" */; \
if (required > buf.len) { \
do { \
buf.len *= 2; \
} while (required > buf.len); \
char *newp = h2o_mem_alloc_pool(&req->pool, buf.len); \
memcpy(newp, buf.base, offset); \
buf.base = newp; \
} \
} while (0)
#define APPEND(s, l) \
do { \
memcpy(buf.base + offset, (s), (l)); \
offset += (l); \
} while (0)
#define APPEND_STRLIT(lit) APPEND((lit), sizeof(lit) - 1)
#define FLATTEN_PREFIXED_VALUE(prefix, value, add_size) \
do { \
RESERVE(sizeof(prefix) - 1 + value.len + 2 + add_size); \
APPEND_STRLIT(prefix); \
if (value.len != 0) { \
APPEND(value.base, value.len); \
if (add_size != 0) { \
buf.base[offset++] = ','; \
buf.base[offset++] = ' '; \
} \
} \
} while (0)
if (use_proxy_protocol)
offset += h2o_stringify_proxy_header(req->conn, buf.base + offset);
APPEND(req->method.base, req->method.len);
buf.base[offset++] = ' ';
APPEND(req->path.base, req->path.len);
APPEND_STRLIT(" HTTP/1.1\r\nconnection: ");
if (is_websocket_handshake) {
APPEND_STRLIT("upgrade\r\nupgrade: websocket\r\nhost: ");
} else if (keepalive) {
APPEND_STRLIT("keep-alive\r\nhost: ");
} else {
APPEND_STRLIT("close\r\nhost: ");
}
APPEND(req->authority.base, req->authority.len);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
assert(offset <= buf.len);
if (req->entity.base != NULL || req_requires_content_length(req)) {
RESERVE(sizeof("content-length: " H2O_UINT64_LONGEST_STR) - 1);
offset += sprintf(buf.base + offset, "content-length: %zu\r\n", req->entity.len);
}
{
const h2o_header_t *h, *h_end;
for (h = req->headers.entries, h_end = h + req->headers.size; h != h_end; ++h) {
if (h2o_iovec_is_token(h->name)) {
const h2o_token_t *token = (void *)h->name;
if (token->proxy_should_drop) {
continue;
} else if (token == H2O_TOKEN_COOKIE) {
/* merge the cookie headers; see HTTP/2 8.1.2.5 and HTTP/1 (RFC6265 5.4) */
/* FIXME current algorithm is O(n^2) against the number of cookie headers */
cookie_buf = build_request_merge_headers(&req->pool, cookie_buf, h->value, ';');
continue;
} else if (token == H2O_TOKEN_VIA) {
via_buf = build_request_merge_headers(&req->pool, via_buf, h->value, ',');
continue;
} else if (token == H2O_TOKEN_X_FORWARDED_FOR) {
if (!emit_x_forwarded_headers) {
goto AddHeader;
}
xff_buf = build_request_merge_headers(&req->pool, xff_buf, h->value, ',');
continue;
}
}
if (!preserve_x_forwarded_proto && h2o_lcstris(h->name->base, h->name->len, H2O_STRLIT("x-forwarded-proto")))
continue;
AddHeader:
RESERVE(h->name->len + h->value.len + 2);
APPEND(h->name->base, h->name->len);
buf.base[offset++] = ':';
buf.base[offset++] = ' ';
APPEND(h->value.base, h->value.len);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
}
if (cookie_buf.len != 0) {
FLATTEN_PREFIXED_VALUE("cookie: ", cookie_buf, 0);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
if (emit_x_forwarded_headers) {
if (!preserve_x_forwarded_proto) {
FLATTEN_PREFIXED_VALUE("x-forwarded-proto: ", req->input.scheme->name, 0);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
if (remote_addr_len != SIZE_MAX) {
FLATTEN_PREFIXED_VALUE("x-forwarded-for: ", xff_buf, remote_addr_len);
APPEND(remote_addr, remote_addr_len);
} else {
FLATTEN_PREFIXED_VALUE("x-forwarded-for: ", xff_buf, 0);
}
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
FLATTEN_PREFIXED_VALUE("via: ", via_buf, sizeof("1.1 ") - 1 + req->input.authority.len);
if (req->version < 0x200) {
buf.base[offset++] = '1';
buf.base[offset++] = '.';
buf.base[offset++] = '0' + (0x100 <= req->version && req->version <= 0x109 ? req->version - 0x100 : 0);
} else {
buf.base[offset++] = '2';
}
buf.base[offset++] = ' ';
APPEND(req->input.authority.base, req->input.authority.len);
APPEND_STRLIT("\r\n\r\n");
#undef RESERVE
#undef APPEND
#undef APPEND_STRLIT
#undef FLATTEN_PREFIXED_VALUE
/* set the length */
assert(offset <= buf.len);
buf.len = offset;
return buf;
}
static void do_close(h2o_generator_t *generator, h2o_req_t *req)
{
struct rp_generator_t *self = (void *)generator;
if (self->client != NULL) {
h2o_http1client_cancel(self->client);
self->client = NULL;
}
}
static void do_send(struct rp_generator_t *self)
{
h2o_iovec_t vecs[1];
size_t veccnt;
h2o_send_state_t ststate;
assert(self->sending.bytes_inflight == 0);
vecs[0] = h2o_doublebuffer_prepare(&self->sending,
self->client != NULL ? &self->client->sock->input : &self->last_content_before_send,
self->src_req->preferred_chunk_size);
if (self->client == NULL && vecs[0].len == self->sending.buf->size && self->last_content_before_send->size == 0) {
veccnt = vecs[0].len != 0 ? 1 : 0;
ststate = H2O_SEND_STATE_FINAL;
} else {
if (vecs[0].len == 0)
return;
veccnt = 1;
ststate = H2O_SEND_STATE_IN_PROGRESS;
}
if (self->had_body_error)
ststate = H2O_SEND_STATE_ERROR;
h2o_send(self->src_req, vecs, veccnt, ststate);
}
static void do_proceed(h2o_generator_t *generator, h2o_req_t *req)
{
struct rp_generator_t *self = (void *)generator;
h2o_doublebuffer_consume(&self->sending);
do_send(self);
}
static void on_websocket_upgrade_complete(void *_info, h2o_socket_t *sock, size_t reqsize)
{
struct rp_ws_upgrade_info_t *info = _info;
if (sock != NULL) {
h2o_tunnel_establish(info->ctx, sock, info->upstream_sock, info->timeout);
} else {
h2o_socket_close(info->upstream_sock);
}
free(info);
}
static inline void on_websocket_upgrade(struct rp_generator_t *self, h2o_timeout_t *timeout)
{
h2o_req_t *req = self->src_req;
h2o_socket_t *sock = h2o_http1client_steal_socket(self->client);
struct rp_ws_upgrade_info_t *info = h2o_mem_alloc(sizeof(*info));
info->upstream_sock = sock;
info->timeout = timeout;
info->ctx = req->conn->ctx;
h2o_http1_upgrade(req, NULL, 0, on_websocket_upgrade_complete, info);
}
static int on_body(h2o_http1client_t *client, const char *errstr)
{
struct rp_generator_t *self = client->data;
if (errstr != NULL) {
/* detach the content */
self->last_content_before_send = self->client->sock->input;
h2o_buffer_init(&self->client->sock->input, &h2o_socket_buffer_prototype);
self->client = NULL;
if (errstr != h2o_http1client_error_is_eos) {
h2o_req_log_error(self->src_req, "lib/core/proxy.c", "%s", errstr);
self->had_body_error = 1;
}
}
if (self->sending.bytes_inflight == 0)
do_send(self);
return 0;
}
static h2o_http1client_body_cb on_head(h2o_http1client_t *client, const char *errstr, int minor_version, int status,
h2o_iovec_t msg, h2o_http1client_header_t *headers, size_t num_headers)
{
struct rp_generator_t *self = client->data;
h2o_req_t *req = self->src_req;
size_t i;
if (errstr != NULL && errstr != h2o_http1client_error_is_eos) {
self->client = NULL;
h2o_req_log_error(req, "lib/core/proxy.c", "%s", errstr);
h2o_send_error_502(req, "Gateway Error", errstr, 0);
return NULL;
}
/* copy the response (note: all the headers must be copied; http1client discards the input once we return from this callback) */
req->res.status = status;
req->res.reason = h2o_strdup(&req->pool, msg.base, msg.len).base;
for (i = 0; i != num_headers; ++i) {
const h2o_token_t *token = h2o_lookup_token(headers[i].name, headers[i].name_len);
h2o_iovec_t value;
if (token != NULL) {
if (token->proxy_should_drop) {
goto Skip;
}
if (token == H2O_TOKEN_CONTENT_LENGTH) {
if (req->res.content_length != SIZE_MAX ||
(req->res.content_length = h2o_strtosize(headers[i].value, headers[i].value_len)) == SIZE_MAX) {
self->client = NULL;
h2o_req_log_error(req, "lib/core/proxy.c", "%s", "invalid response from upstream (malformed content-length)");
h2o_send_error_502(req, "Gateway Error", "invalid response from upstream", 0);
return NULL;
}
goto Skip;
} else if (token == H2O_TOKEN_LOCATION) {
if (req->res_is_delegated && (300 <= status && status <= 399) && status != 304) {
self->client = NULL;
h2o_iovec_t method = h2o_get_redirect_method(req->method, status);
h2o_send_redirect_internal(req, method, headers[i].value, headers[i].value_len, 1);
return NULL;
}
if (req->overrides != NULL && req->overrides->location_rewrite.match != NULL) {
value =
rewrite_location(&req->pool, headers[i].value, headers[i].value_len, req->overrides->location_rewrite.match,
req->input.scheme, req->input.authority, req->overrides->location_rewrite.path_prefix);
if (value.base != NULL)
goto AddHeader;
}
goto AddHeaderDuped;
} else if (token == H2O_TOKEN_LINK) {
h2o_push_path_in_link_header(req, headers[i].value, headers[i].value_len);
}
/* default behaviour, transfer the header downstream */
AddHeaderDuped:
value = h2o_strdup(&req->pool, headers[i].value, headers[i].value_len);
AddHeader:
h2o_add_header(&req->pool, &req->res.headers, token, value.base, value.len);
Skip:;
} else {
h2o_iovec_t name = h2o_strdup(&req->pool, headers[i].name, headers[i].name_len);
h2o_iovec_t value = h2o_strdup(&req->pool, headers[i].value, headers[i].value_len);
h2o_add_header_by_str(&req->pool, &req->res.headers, name.base, name.len, 0, value.base, value.len);
}
}
if (self->is_websocket_handshake && req->res.status == 101) {
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
assert(client_ctx->websocket_timeout != NULL);
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_UPGRADE, H2O_STRLIT("websocket"));
on_websocket_upgrade(self, client_ctx->websocket_timeout);
self->client = NULL;
return NULL;
}
/* declare the start of the response */
h2o_start_response(req, &self->super);
if (errstr == h2o_http1client_error_is_eos) {
self->client = NULL;
h2o_send(req, NULL, 0, H2O_SEND_STATE_FINAL);
return NULL;
}
return on_body;
}
static int on_1xx(h2o_http1client_t *client, int minor_version, int status, h2o_iovec_t msg, h2o_http1client_header_t *headers,
size_t num_headers)
{
struct rp_generator_t *self = client->data;
size_t i;
for (i = 0; i != num_headers; ++i) {
if (h2o_memis(headers[i].name, headers[i].name_len, H2O_STRLIT("link")))
h2o_push_path_in_link_header(self->src_req, headers[i].value, headers[i].value_len);
}
return 0;
}
static h2o_http1client_head_cb on_connect(h2o_http1client_t *client, const char *errstr, h2o_iovec_t **reqbufs, size_t *reqbufcnt,
int *method_is_head)
{
struct rp_generator_t *self = client->data;
if (errstr != NULL) {
self->client = NULL;
h2o_req_log_error(self->src_req, "lib/core/proxy.c", "%s", errstr);
h2o_send_error_502(self->src_req, "Gateway Error", errstr, 0);
return NULL;
}
*reqbufs = self->up_req.bufs;
*reqbufcnt = self->up_req.bufs[1].base != NULL ? 2 : 1;
*method_is_head = self->up_req.is_head;
self->client->informational_cb = on_1xx;
return on_head;
}
static void on_generator_dispose(void *_self)
{
struct rp_generator_t *self = _self;
if (self->client != NULL) {
h2o_http1client_cancel(self->client);
self->client = NULL;
}
h2o_buffer_dispose(&self->last_content_before_send);
h2o_doublebuffer_dispose(&self->sending);
}
static struct rp_generator_t *proxy_send_prepare(h2o_req_t *req, int keepalive, int use_proxy_protocol)
{
struct rp_generator_t *self = h2o_mem_alloc_shared(&req->pool, sizeof(*self), on_generator_dispose);
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
self->super.proceed = do_proceed;
self->super.stop = do_close;
self->src_req = req;
if (client_ctx->websocket_timeout != NULL && h2o_lcstris(req->upgrade.base, req->upgrade.len, H2O_STRLIT("websocket"))) {
self->is_websocket_handshake = 1;
} else {
self->is_websocket_handshake = 0;
}
self->had_body_error = 0;
self->up_req.bufs[0] = build_request(req, keepalive, self->is_websocket_handshake, use_proxy_protocol);
self->up_req.bufs[1] = req->entity;
self->up_req.is_head = h2o_memis(req->method.base, req->method.len, H2O_STRLIT("HEAD"));
h2o_buffer_init(&self->last_content_before_send, &h2o_socket_buffer_prototype);
h2o_doublebuffer_init(&self->sending, &h2o_socket_buffer_prototype);
return self;
}
void h2o__proxy_process_request(h2o_req_t *req)
{
h2o_req_overrides_t *overrides = req->overrides;
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
struct rp_generator_t *self;
if (overrides != NULL) {
if (overrides->socketpool != NULL) {
if (overrides->use_proxy_protocol)
assert(!"proxy protocol cannot be used for a persistent upstream connection");
self = proxy_send_prepare(req, 1, 0);
h2o_http1client_connect_with_pool(&self->client, self, client_ctx, overrides->socketpool, on_connect);
return;
} else if (overrides->hostport.host.base != NULL) {
self = proxy_send_prepare(req, 0, overrides->use_proxy_protocol);
h2o_http1client_connect(&self->client, self, client_ctx, req->overrides->hostport.host, req->overrides->hostport.port,
0, on_connect);
return;
}
}
{ /* default logic */
h2o_iovec_t host;
uint16_t port;
if (h2o_url_parse_hostport(req->authority.base, req->authority.len, &host, &port) == NULL) {
h2o_req_log_error(req, "lib/core/proxy.c", "invalid URL supplied for internal redirection:%s://%.*s%.*s",
req->scheme->name.base, (int)req->authority.len, req->authority.base, (int)req->path.len,
req->path.base);
h2o_send_error_502(req, "Gateway Error", "internal error", 0);
return;
}
if (port == 65535)
port = req->scheme->default_port;
self = proxy_send_prepare(req, 0, overrides != NULL && overrides->use_proxy_protocol);
h2o_http1client_connect(&self->client, self, client_ctx, host, port, req->scheme == &H2O_URL_SCHEME_HTTPS, on_connect);
return;
}
}
| 1 | 11,673 | Could we accept caseless strings (e.g. `off`, `on`, `auto`) instead of numbers to indicate the mode? Of course, we should use an enum internally (as we already do). | h2o-h2o | c |
@@ -28,7 +28,8 @@ const Topic = "/fil/msgs"
// Abstracts over a store of blockchain state.
type chainState interface {
GetHead() types.SortedCidSet
- GetTipSetAndState(tsKey types.SortedCidSet) (*chain.TipSetAndState, error)
+ GetTipSet(tsKey types.SortedCidSet) (*types.TipSet, error)
+ GetTipSetStateRoot(tsKey types.SortedCidSet) (cid.Cid, error)
}
// BlockClock defines a interface to a struct that can give the current block height. | 1 | package msg
import (
"context"
"sync"
"github.com/ipfs/go-cid"
hamt "github.com/ipfs/go-hamt-ipld"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/abi"
"github.com/filecoin-project/go-filecoin/actor"
"github.com/filecoin-project/go-filecoin/actor/builtin"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/chain"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/metrics"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
)
var msgSendErrCt = metrics.NewInt64Counter("message_sender_error", "Number of errors encountered while sending a message")
// Topic is the network pubsub topic identifier on which new messages are announced.
const Topic = "/fil/msgs"
// Abstracts over a store of blockchain state.
type chainState interface {
GetHead() types.SortedCidSet
GetTipSetAndState(tsKey types.SortedCidSet) (*chain.TipSetAndState, error)
}
// BlockClock defines a interface to a struct that can give the current block height.
type BlockClock interface {
BlockHeight() (uint64, error)
}
// PublishFunc is a function the Sender calls to publish a message to the network.
type PublishFunc func(topic string, data []byte) error
// Sender is plumbing implementation that knows how to send a message.
type Sender struct {
// Signs messages.
signer types.Signer
// Provides actor state
chainState chainState
// To load the tree for the head tipset state root.
cst *hamt.CborIpldStore
// Provides the current block height
blockTimer BlockClock
// Tracks inbound messages for mining
inbox *core.MessagePool
// Tracks outbound messages
outbox *core.MessageQueue
// Validates messages before sending them.
validator consensus.SignedMessageValidator
// Invoked to publish the new message to the network.
publish PublishFunc
// Protects the "next nonce" calculation to avoid collisions.
l sync.Mutex
}
// NewSender returns a new Sender. There should be exactly one of these per node because
// sending locks to reduce nonce collisions.
func NewSender(signer types.Signer, chainReader chain.ReadStore, cst *hamt.CborIpldStore, blockTimer BlockClock,
msgQueue *core.MessageQueue, msgPool *core.MessagePool,
validator consensus.SignedMessageValidator, publish PublishFunc) *Sender {
return &Sender{
signer: signer,
chainState: chainReader,
cst: cst,
blockTimer: blockTimer,
inbox: msgPool,
outbox: msgQueue,
validator: validator,
publish: publish,
}
}
// Send sends a message. See api description.
func (s *Sender) Send(ctx context.Context, from, to address.Address, value *types.AttoFIL, gasPrice types.AttoFIL, gasLimit types.GasUnits, method string, params ...interface{}) (out cid.Cid, err error) {
defer func() {
if err != nil {
msgSendErrCt.Inc(ctx, 1)
}
}()
encodedParams, err := abi.ToEncodedValues(params...)
if err != nil {
return cid.Undef, errors.Wrap(err, "invalid params")
}
// Lock to avoid race for message nonce.
s.l.Lock()
defer s.l.Unlock()
headTs := s.chainState.GetHead()
tsas, err := s.chainState.GetTipSetAndState(headTs)
if err != nil {
return cid.Undef, errors.Wrap(err, "couldnt get latest state root")
}
st, err := state.LoadStateTree(ctx, s.cst, tsas.TipSetStateRoot, builtin.Actors)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to load state from chain")
}
fromActor, err := st.GetActor(ctx, from)
if err != nil {
return cid.Undef, errors.Wrapf(err, "no actor at address %s", from)
}
nonce, err := nextNonce(fromActor, s.outbox, from)
if err != nil {
return cid.Undef, errors.Wrapf(err, "failed calculating nonce for actor %s", from)
}
msg := types.NewMessage(from, to, nonce, value, method, encodedParams)
smsg, err := types.NewSignedMessage(*msg, s.signer, gasPrice, gasLimit)
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to sign message")
}
err = s.validator.Validate(ctx, smsg, fromActor)
if err != nil {
return cid.Undef, errors.Wrap(err, "invalid message")
}
smsgdata, err := smsg.Marshal()
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to marshal message")
}
height, err := s.blockTimer.BlockHeight()
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to get block height")
}
// Add to the local message queue/pool at the last possible moment before broadcasting to network.
if err := s.outbox.Enqueue(smsg, height); err != nil {
return cid.Undef, errors.Wrap(err, "failed to add message to outbound queue")
}
if _, err := s.inbox.Add(ctx, smsg); err != nil {
return cid.Undef, errors.Wrap(err, "failed to add message to message pool")
}
if err = s.publish(Topic, smsgdata); err != nil {
return cid.Undef, errors.Wrap(err, "failed to publish message to network")
}
log.Debugf("MessageSend with message: %s", smsg)
return smsg.Cid()
}
// nextNonce returns the next expected nonce value for an account actor. This is the larger
// of the actor's nonce value, or one greater than the largest nonce from the actor found in the message pool.
func nextNonce(act *actor.Actor, outbox *core.MessageQueue, address address.Address) (uint64, error) {
actorNonce, err := actor.NextNonce(act)
if err != nil {
return 0, err
}
poolNonce, found := outbox.LargestNonce(address)
if found && poolNonce >= actorNonce {
return poolNonce + 1, nil
}
return actorNonce, nil
}
| 1 | 19,134 | The sender doesn't use `GetTipSet`, so please remove it from this interface. | filecoin-project-venus | go |
@@ -32,10 +32,10 @@ import org.openqa.selenium.remote.tracing.HttpTracing;
import org.openqa.selenium.remote.tracing.Tracer;
import java.net.URL;
+import java.util.Objects;
import java.util.UUID;
import java.util.logging.Logger;
-import static org.openqa.selenium.net.Urls.fromUri;
import static org.openqa.selenium.remote.http.Contents.asJson;
import static org.openqa.selenium.remote.http.HttpMethod.DELETE;
import static org.openqa.selenium.remote.http.HttpMethod.GET; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.distributor.remote;
import org.openqa.selenium.SessionNotCreatedException;
import org.openqa.selenium.grid.data.CreateSessionResponse;
import org.openqa.selenium.grid.data.DistributorStatus;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.grid.node.Node;
import org.openqa.selenium.grid.web.Values;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.tracing.HttpTracing;
import org.openqa.selenium.remote.tracing.Tracer;
import java.net.URL;
import java.util.UUID;
import java.util.logging.Logger;
import static org.openqa.selenium.net.Urls.fromUri;
import static org.openqa.selenium.remote.http.Contents.asJson;
import static org.openqa.selenium.remote.http.HttpMethod.DELETE;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
public class RemoteDistributor extends Distributor {
private static final Logger LOG = Logger.getLogger("Selenium Distributor (Remote)");
private final HttpHandler client;
public RemoteDistributor(Tracer tracer, HttpClient.Factory factory, URL url) {
super(tracer, factory);
this.client = factory.createClient(url);
}
@Override
public boolean isReady() {
try {
return client.execute(new HttpRequest(GET, "/readyz")).isSuccessful();
} catch (Exception e) {
return false;
}
}
@Override
public CreateSessionResponse newSession(HttpRequest request)
throws SessionNotCreatedException {
HttpRequest upstream = new HttpRequest(POST, "/se/grid/distributor/session");
HttpTracing.inject(tracer, tracer.getCurrentContext(), upstream);
upstream.setContent(request.getContent());
HttpResponse response = client.execute(upstream);
return Values.get(response, CreateSessionResponse.class);
}
@Override
public RemoteDistributor add(Node node) {
HttpRequest request = new HttpRequest(POST, "/se/grid/distributor/node");
HttpTracing.inject(tracer, tracer.getCurrentContext(), request);
request.setContent(asJson(node.getStatus()));
HttpResponse response = client.execute(request);
Values.get(response, Void.class);
LOG.info(String.format("Added node %s.", node.getId()));
return this;
}
@Override
public void remove(UUID nodeId) {
Require.nonNull("Node ID", nodeId);
HttpRequest request = new HttpRequest(DELETE, "/se/grid/distributor/node/" + nodeId);
HttpTracing.inject(tracer, tracer.getCurrentContext(), request);
HttpResponse response = client.execute(request);
Values.get(response, Void.class);
}
@Override
public DistributorStatus getStatus() {
HttpRequest request = new HttpRequest(GET, "/se/grid/distributor/status");
HttpTracing.inject(tracer, tracer.getCurrentContext(), request);
HttpResponse response = client.execute(request);
return Values.get(response, DistributorStatus.class);
}
}
| 1 | 17,752 | We can get rid of this import then. | SeleniumHQ-selenium | rb |
@@ -92,9 +92,9 @@ public class KubernetesContainerizedImpl implements ContainerizedImpl {
public static final String DEFAULT_NSCD_SOCKET_HOST_PATH = "/var/run/nscd/socket";
public static final String HOST_PATH_TYPE = "Socket";
public static final String DEFAULT_NSCD_SOCKET_VOLUME_MOUNT_PATH = "/var/run/nscd/socket";
- public static final String DEFAULT_SECRET_NAME = "azkaban-private-properties";
+ public static final String DEFAULT_SECRET_NAME = "azkaban-k8s-secret";
public static final String DEFAULT_SECRET_VOLUME = DEFAULT_SECRET_NAME;
- public static final String DEFAULT_SECRET_MOUNTPATH = "/var/azkaban/private/conf";
+ public static final String DEFAULT_SECRET_MOUNTPATH = "/var/azkaban/private";
private final String namespace; | 1 | /*
* Copyright 2020 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor.container;
import static azkaban.Constants.ImageMgmtConstants.AZKABAN_CONFIG;
import static azkaban.Constants.ImageMgmtConstants.AZKABAN_BASE_IMAGE;
import azkaban.Constants;
import azkaban.Constants.ConfigurationKeys;
import azkaban.Constants.ContainerizedDispatchManagerProperties;
import azkaban.container.models.AzKubernetesV1PodBuilder;
import azkaban.container.models.AzKubernetesV1ServiceBuilder;
import azkaban.container.models.AzKubernetesV1SpecBuilder;
import azkaban.container.models.ImagePullPolicy;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutableFlowBase;
import azkaban.executor.ExecutableNode;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.imagemgmt.rampup.ImageRampupManager;
import azkaban.imagemgmt.version.VersionInfo;
import azkaban.imagemgmt.version.VersionSet;
import azkaban.imagemgmt.version.VersionSetBuilder;
import azkaban.imagemgmt.version.VersionSetLoader;
import azkaban.utils.Props;
import com.google.common.collect.ImmutableMap;
import com.google.common.annotations.VisibleForTesting;
import io.kubernetes.client.openapi.ApiClient;
import io.kubernetes.client.openapi.ApiException;
import io.kubernetes.client.openapi.apis.CoreV1Api;
import io.kubernetes.client.openapi.models.V1DeleteOptions;
import io.kubernetes.client.openapi.models.V1Pod;
import io.kubernetes.client.openapi.models.V1PodSpec;
import io.kubernetes.client.openapi.models.V1Service;
import io.kubernetes.client.openapi.models.V1Status;
import io.kubernetes.client.util.ClientBuilder;
import io.kubernetes.client.util.KubeConfig;
import io.kubernetes.client.util.Yaml;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is Kubernetes based implementation for containerization. It has implementation for
* creation/deletion of Pod and service. For any execution, it will identify version set and create
* a pod for all the valid jobTypes of a flow.
*/
@Singleton
public class KubernetesContainerizedImpl implements ContainerizedImpl {
public static final String DEFAULT_FLOW_CONTAINER_NAME_PREFIX = "az-flow-container";
public static final String DEFAULT_POD_NAME_PREFIX = "fc-dep";
public static final String DEFAULT_SERVICE_NAME_PREFIX = "fc-svc";
public static final String DEFAULT_CLUSTER_NAME = "azkaban";
public static final String CPU_LIMIT = "4";
public static final String DEFAULT_CPU_REQUEST = "1";
public static final String MEMORY_LIMIT = "64Gi";
public static final String DEFAULT_MEMORY_REQUEST = "2Gi";
public static final String MAPPING = "Mapping";
public static final String SERVICE_API_VERSION_2 = "ambassador/v2";
public static final String DEFAULT_INIT_MOUNT_PATH_PREFIX_FOR_JOBTYPES = "/data/jobtypes";
public static final String DEFAULT_APP_MOUNT_PATH_PREFIX_FOR_JOBTYPES =
"/export/apps/azkaban/azkaban-exec-server/current/plugins/jobtypes";
public static final String IMAGE = "image";
public static final String VERSION = "version";
public static final String NSCD_SOCKET_VOLUME_NAME = "nscd-socket";
public static final String DEFAULT_NSCD_SOCKET_HOST_PATH = "/var/run/nscd/socket";
public static final String HOST_PATH_TYPE = "Socket";
public static final String DEFAULT_NSCD_SOCKET_VOLUME_MOUNT_PATH = "/var/run/nscd/socket";
public static final String DEFAULT_SECRET_NAME = "azkaban-private-properties";
public static final String DEFAULT_SECRET_VOLUME = DEFAULT_SECRET_NAME;
public static final String DEFAULT_SECRET_MOUNTPATH = "/var/azkaban/private/conf";
private final String namespace;
private final ApiClient client;
private final CoreV1Api coreV1Api;
private final Props azkProps;
private final ExecutorLoader executorLoader;
private final String podPrefix;
private final String servicePrefix;
private final String clusterName;
private final String flowContainerName;
private final String cpuLimit;
private final String cpuRequest;
private final String memoryLimit;
private final String memoryRequest;
private final int servicePort;
private final long serviceTimeout;
private final String nscdSocketHostPath;
private final String nscdSocketVolumeMountPath;
private final VersionSetLoader versionSetLoader;
private final ImageRampupManager imageRampupManager;
private final String initMountPathPrefixForJobtypes;
private final String appMountPathPrefixForJobtypes;
private static final Set<String> INCLUDED_JOB_TYPES = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
private final String secretName;
private final String secretVolume;
private final String secretMountpath;
private static final Logger logger = LoggerFactory
.getLogger(KubernetesContainerizedImpl.class);
@Inject
public KubernetesContainerizedImpl(final Props azkProps,
final ExecutorLoader executorLoader,
final VersionSetLoader versionSetLoader,
final ImageRampupManager imageRampupManager)
throws ExecutorManagerException {
this.azkProps = azkProps;
this.executorLoader = executorLoader;
this.versionSetLoader = versionSetLoader;
this.imageRampupManager = imageRampupManager;
this.namespace = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_NAMESPACE);
this.flowContainerName =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_NAME
, DEFAULT_FLOW_CONTAINER_NAME_PREFIX);
this.podPrefix =
this.azkProps.getString(ContainerizedDispatchManagerProperties.KUBERNETES_POD_NAME_PREFIX,
DEFAULT_POD_NAME_PREFIX);
this.servicePrefix = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_NAME_PREFIX,
DEFAULT_SERVICE_NAME_PREFIX);
this.clusterName = this.azkProps.getString(ConfigurationKeys.AZKABAN_CLUSTER_NAME,
DEFAULT_CLUSTER_NAME);
this.cpuLimit = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_CPU_LIMIT,
CPU_LIMIT);
this.cpuRequest = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_CPU_REQUEST,
DEFAULT_CPU_REQUEST);
this.memoryLimit = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_MEMORY_LIMIT,
MEMORY_LIMIT);
this.memoryRequest = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_MEMORY_REQUEST,
DEFAULT_MEMORY_REQUEST);
this.servicePort =
this.azkProps.getInt(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_PORT,
54343);
this.serviceTimeout =
this.azkProps
.getLong(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_CREATION_TIMEOUT_MS,
60000);
this.initMountPathPrefixForJobtypes =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_INIT_MOUNT_PATH_FOR_JOBTYPES,
DEFAULT_INIT_MOUNT_PATH_PREFIX_FOR_JOBTYPES);
this.appMountPathPrefixForJobtypes =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_MOUNT_PATH_FOR_JOBTYPES,
DEFAULT_APP_MOUNT_PATH_PREFIX_FOR_JOBTYPES);
this.nscdSocketHostPath =
this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_POD_NSCD_SOCKET_HOST_PATH,
DEFAULT_NSCD_SOCKET_HOST_PATH);
this.nscdSocketVolumeMountPath =
this.azkProps.getString(
ContainerizedDispatchManagerProperties.KUBERNETES_POD_NSCD_SOCKET_VOLUME_MOUNT_PATH,
DEFAULT_NSCD_SOCKET_VOLUME_MOUNT_PATH);
this.secretName = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_SECRET_NAME,
DEFAULT_SECRET_NAME);
this.secretVolume = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_SECRET_VOLUME,
DEFAULT_SECRET_VOLUME);
this.secretMountpath = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_FLOW_CONTAINER_SECRET_MOUNTPATH,
DEFAULT_SECRET_MOUNTPATH);
try {
// Path to the configuration file for Kubernetes which contains information about
// Kubernetes API Server and identity for authentication
final String kubeConfigPath = this.azkProps
.getString(ContainerizedDispatchManagerProperties.KUBERNETES_KUBE_CONFIG_PATH);
logger.info("Kube config path is : {}", kubeConfigPath);
this.client =
ClientBuilder.kubeconfig(KubeConfig.loadKubeConfig(
Files.newBufferedReader(Paths.get(kubeConfigPath), Charset.defaultCharset())))
.build();
this.coreV1Api = new CoreV1Api(this.client);
} catch (final IOException exception) {
logger.error("Unable to read kube config file: {}", exception.getMessage());
throw new ExecutorManagerException(exception);
}
// Add all the job types that are readily available as part of azkaban base image.
this.addIncludedJobTypes();
}
/**
* Populate the included job types set with all the types that are readily available as part of
* azkaban base image.
*/
private void addIncludedJobTypes() {
INCLUDED_JOB_TYPES.add("hadoopJava");
INCLUDED_JOB_TYPES.add("hadoopShell");
INCLUDED_JOB_TYPES.add("hive");
INCLUDED_JOB_TYPES.add("java");
INCLUDED_JOB_TYPES.add("java2");
INCLUDED_JOB_TYPES.add("pig");
INCLUDED_JOB_TYPES.add("pigLi");
INCLUDED_JOB_TYPES.add("command");
INCLUDED_JOB_TYPES.add("javaprocess");
INCLUDED_JOB_TYPES.add("noop");
}
/**
* Check if job type contains in the included job types. If not check if the job type starts with
* the any of the job types present in the included job type set. For example, in case of pig
* job type it can contain version such as pigLi-0.11.1. This is nothing but pointing to the
* different installation pig job. Hence, it just matches the prefix i.e. pigLi which is the
* actual job type name.
* @param jobType
* @return boolean
*/
private boolean isPresentInIncludedJobTypes(String jobType) {
if(INCLUDED_JOB_TYPES.contains(jobType)) {
return true;
} else {
return isStartWithIncludedJobTypes(jobType);
}
}
/**
* Check if the job type starts with the aay of the job types present in the included job type
* set. For example, in case of pig job type it can contain version such as pigLi-0.11.1. This
* is nothing but pointing to the different installation pig job. Hence, it just matches the
* prefix i.e. pigLi which is the actual job type name.
* @param jobType
* @return boolean
*/
private boolean isStartWithIncludedJobTypes(String jobType) {
for(String includedJobType : INCLUDED_JOB_TYPES) {
if(jobType.toLowerCase().startsWith(includedJobType.toLowerCase())) {
return true;
}
}
return false;
}
/**
* Filter out the included job types from the given job types.
* @param jobTypes
* @return Set<String>
*/
private Set<String> filterIncludedJobTypes(Set<String> jobTypes) {
return jobTypes.stream()
.filter(jobType -> !isPresentInIncludedJobTypes(jobType))
.collect(Collectors.toSet());
}
/**
* This method is used to create container during dispatch of execution. It will create pod for a
* flow execution. It will also create a service for a pod if azkaban.kubernetes.service .required
* property is set.
*
* @param executionId
* @throws ExecutorManagerException
*/
@Override
public void createContainer(final int executionId) throws ExecutorManagerException {
createPod(executionId);
if (isServiceRequired()) {
createService(executionId);
}
}
/**
* This method is used to delete container. It will delete pod for a flow execution. If the
* service was created then it will also delete the service. This method can be called as a part
* of cleanup process for containers in case containers didn't shutdown gracefully.
*
* @param executionId
* @throws ExecutorManagerException
*/
@Override
public void deleteContainer(final int executionId) throws ExecutorManagerException {
deletePod(executionId);
if (isServiceRequired()) {
deleteService(executionId);
}
}
/**
* Construct the flow override parameter (key) for image version.
* @param imageType
* @return flow override param
*/
private String imageTypeOverrideParam(String imageType) {
return String.join(".", IMAGE, imageType, VERSION);
}
/**
* This method fetches the complete version set information (Map of jobs and their versions)
* required to run the flow.
*
* @param flowParams
* @param imageTypesUsedInFlow
* @return VersionSet
* @throws ExecutorManagerException
*/
@VisibleForTesting
VersionSet fetchVersionSet(final int executionId, Map<String, String> flowParams,
Set<String> imageTypesUsedInFlow) throws ExecutorManagerException {
VersionSet versionSet = null;
try {
if (flowParams != null &&
flowParams.containsKey(Constants.FlowParameters.FLOW_PARAM_VERSION_SET_ID)) {
int versionSetId = Integer.parseInt(flowParams
.get(Constants.FlowParameters.FLOW_PARAM_VERSION_SET_ID));
try {
versionSet = this.versionSetLoader.getVersionSetById(versionSetId).get();
/*
* Validate that all images part of the flow are included in the retrieved
* VersionSet. If there are images that were not part of the retrieved version
* set, then create a new VersionSet with a superset of all images.
*/
Set<String> imageVersionsNotFound = new TreeSet<>();
Map<String, VersionInfo> overlayMap = new HashMap<>();
for (String imageType : imageTypesUsedInFlow) {
if (flowParams.containsKey(imageTypeOverrideParam(imageType))) {
// Fetches the user overridden version from the database and this will make sure if
// the overridden version exists/registered on Azkaban database. Hence, it follows a
// fail fast mechanism to throw exception if the version does not exist for the
// given image type.
overlayMap.put(imageType, this.imageRampupManager.getVersionInfo(imageType,
flowParams.get(imageTypeOverrideParam(imageType))));
} else if (!(isPresentInIncludedJobTypes(imageType) || versionSet.getVersion(imageType).isPresent())) {
logger.info("ExecId: {}, imageType: {} not found in versionSet {}",
executionId, imageType, versionSetId);
imageVersionsNotFound.add(imageType);
}
}
if (!(imageVersionsNotFound.isEmpty() && overlayMap.isEmpty())) {
// Populate a new Version Set
logger.info("ExecId: {}, Flow had more imageTypes than specified in versionSet {}. "
+ "Constructing a new one", executionId, versionSetId);
VersionSetBuilder versionSetBuilder = new VersionSetBuilder(this.versionSetLoader);
versionSetBuilder.addElements(versionSet.getImageToVersionMap());
// The following is a safety check. Just in case: getVersionByImageTypes fails below due to an
// exception, we will have an incomplete/incorrect versionSet. Setting it null ensures, it will
// be processed from scratch in the following code block
versionSet = null;
if (!imageVersionsNotFound.isEmpty()) {
versionSetBuilder.addElements(
this.imageRampupManager.getVersionByImageTypes(imageVersionsNotFound));
}
if (!overlayMap.isEmpty()) {
versionSetBuilder.addElements(overlayMap);
}
versionSet = versionSetBuilder.build();
}
} catch (Exception e) {
logger.error("ExecId: {}, Could not find version set id: {} as specified by flow params. "
+ "Will continue by creating a new one.", executionId, versionSetId);
}
}
if (versionSet == null) {
// Need to build a version set
// Filter all the job types available in azkaban base image from the input image types set
imageTypesUsedInFlow = this.filterIncludedJobTypes(imageTypesUsedInFlow);
Map<String, VersionInfo> versionMap =
imageRampupManager.getVersionByImageTypes(imageTypesUsedInFlow);
// Now we will check the flow params for any override versions provided and apply them
for (String imageType : imageTypesUsedInFlow) {
final String imageTypeVersionOverrideParam = imageTypeOverrideParam(imageType);
if (flowParams != null && flowParams.containsKey(imageTypeVersionOverrideParam)) {
// Fetches the user overridden version from the database and this will make sure if
// the overridden version exists/registered on Azkaban database. Hence, it follows a
// fail fast mechanism to throw exception if the version does not exist for the
// given image type.
versionMap.put(imageType, this.imageRampupManager.getVersionInfo(imageType,
flowParams.get(imageTypeVersionOverrideParam)));
}
}
VersionSetBuilder versionSetBuilder = new VersionSetBuilder(this.versionSetLoader);
versionSet = versionSetBuilder.addElements(versionMap).build();
}
} catch (IOException e) {
logger.error("ExecId: {}, Exception in fetching the VersionSet. Error msg: {}",
executionId, e.getMessage());
throw new ExecutorManagerException(e);
}
return versionSet;
}
/**
* @param executionId
* @param versionSet
* @param jobTypes
* @return
* @throws ExecutorManagerException
*/
@VisibleForTesting
V1PodSpec createPodSpec(final int executionId, final VersionSet versionSet,
SortedSet<String> jobTypes)
throws ExecutorManagerException {
// Gets azkaban base image full path containing version.
final String azkabanBaseImageFullPath = getAzkabanBaseImageFullPath(versionSet);
// TODO: check if we need full path for config as well.
final String azkabanConfigVersion = getAzkabanConfigVersion(versionSet);
final AzKubernetesV1SpecBuilder v1SpecBuilder =
new AzKubernetesV1SpecBuilder(this.clusterName, Optional.empty())
.addFlowContainer(this.flowContainerName,
azkabanBaseImageFullPath, ImagePullPolicy.IF_NOT_PRESENT, azkabanConfigVersion)
.withResources(this.cpuLimit, this.cpuRequest, this.memoryLimit, this.memoryRequest);
// Add volume for nscd-socket
addNscdSocketInVolume(v1SpecBuilder);
Map<String, String> envVariables = new HashMap<>();
envVariables.put(ContainerizedDispatchManagerProperties.ENV_VERSION_SET_ID,
String.valueOf(versionSet.getVersionSetId()));
envVariables.put(ContainerizedDispatchManagerProperties.ENV_FLOW_EXECUTION_ID,
String.valueOf(executionId));
// Add env variables to spec builder
addEnvVariablesToSpecBuilder(v1SpecBuilder, envVariables);
// Create init container yaml file for each jobType
addInitContainerForAllJobTypes(executionId, jobTypes, v1SpecBuilder, versionSet);
// Add volume with secrets mounted
addSecretVolume(v1SpecBuilder);
return v1SpecBuilder.build();
}
/**
* Adding environment variables in pod spec builder.
*
* @param v1SpecBuilder
* @param envVariables
*/
private void addEnvVariablesToSpecBuilder(AzKubernetesV1SpecBuilder v1SpecBuilder,
Map<String, String> envVariables) {
envVariables.forEach((key, value) -> v1SpecBuilder.addEnvVarToFlowContainer(key, value));
}
/**
* This method is used to add volume for nscd socket.
*
* @param v1SpecBuilder
*/
private void addNscdSocketInVolume(AzKubernetesV1SpecBuilder v1SpecBuilder) {
v1SpecBuilder
.addHostPathVolume(NSCD_SOCKET_VOLUME_NAME, this.nscdSocketHostPath, HOST_PATH_TYPE,
this.nscdSocketVolumeMountPath);
}
/**
*
* @param executionId
* @param podSpec
* @return
*/
@VisibleForTesting
V1Pod createPodFromSpec(int executionId, V1PodSpec podSpec) {
final ImmutableMap<String, String> labels = getLabelsForPod();
final ImmutableMap<String, String> annotations = getAnnotationsForPod();
final V1Pod pod = new AzKubernetesV1PodBuilder(getPodName(executionId), this.namespace, podSpec)
.withPodLabels(labels)
.withPodAnnotations(annotations)
.build();
return pod;
}
/**
* This method is used to create pod. 1. Fetch jobTypes for the flow 2. Fetch flow parameters for
* version set and each image type if it is set. 3. If valid version set is provided then use
* versions from it. 4. If valid version set is not provided then call Ramp up manager API and get
* image version for each image type. 5. Add all the validation around a) whether version set is
* valid or not. b) If it is valid then is there any change in flow and new jobType is introduced
* after version set was created? If so, create new version set using versions mentioned in
* version set and ramp up for new jobType. 6. Create pod spec using all the version information
* 7. Insert version set into execution_flows tables for a reference 8. Emit version set as a part
* of flow life cycle event.
*
* @param executionId
* @throws ExecutorManagerException
*/
private void createPod(final int executionId) throws ExecutorManagerException {
// Fetch execution flow from execution Id.
final ExecutableFlow flow = this.executorLoader.fetchExecutableFlow(executionId);
// Step 1: Fetch set of jobTypes for a flow from executionId
final TreeSet<String> jobTypes = getJobTypesForFlow(flow);
logger.info("ExecId: {}, Jobtypes for flow {} are: {}", executionId, flow.getFlowId(), jobTypes);
final Map<String, String> flowParam =
flow.getExecutionOptions().getFlowParameters();
if (flowParam != null && !flowParam.isEmpty()) {
logger.info("ExecId: {}, Flow Parameters are: {}", executionId, flowParam);
}
// Create all image types by adding azkaban base image, azkaban config and all job types for
// the flow.
final Set<String> allImageTypes = new TreeSet<>();
allImageTypes.add(AZKABAN_BASE_IMAGE);
allImageTypes.add(AZKABAN_CONFIG);
allImageTypes.addAll(jobTypes);
final VersionSet versionSet = fetchVersionSet(executionId, flowParam, allImageTypes);
final V1PodSpec podSpec = createPodSpec(executionId, versionSet, jobTypes);
final V1Pod pod = createPodFromSpec(executionId, podSpec);
String podSpecYaml = Yaml.dump(pod).trim();
logger.debug("ExecId: {}, Pod spec is {}", executionId, podSpecYaml);
// TODO: Add version set number and json in flow life cycle event so users can use this
// information
try {
this.coreV1Api.createNamespacedPod(this.namespace, pod, null, null, null);
logger.info("ExecId: {}, Dispatched pod for execution.", executionId);
} catch (ApiException e) {
logger.error("ExecId: {}, Unable to create Pod: {}", executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
// Store version set id in execution_flows for execution_id
this.executorLoader.updateVersionSetId(executionId, versionSet.getVersionSetId());
}
/**
* TODO: Get azkaban base image version from version set.
*
* @return
*/
private String getAzkabanBaseImageFullPath(final VersionSet versionSet) {
return versionSet.getVersion(AZKABAN_BASE_IMAGE).get().pathWithVersion();
}
private String getAzkabanConfigVersion(final VersionSet versionSet) {
return versionSet.getVersion(AZKABAN_CONFIG).get().getVersion();
}
/**
* TODO: Add implementation to get labels for Pod.
*
* @return
*/
private ImmutableMap getLabelsForPod() {
return ImmutableMap.of("cluster", this.clusterName);
}
/**
* TODO: Add implementation to get annotations for Pod.
*
* @return
*/
private ImmutableMap getAnnotationsForPod() {
return ImmutableMap.of();
}
/**
* TODO: Check if we need to turn everything into lower case?
*
* @param executionId
* @param jobTypes
* @param v1SpecBuilder
* @param versionSet
* @throws ExecutorManagerException
*/
private void addInitContainerForAllJobTypes(final int executionId,
final Set<String> jobTypes, final AzKubernetesV1SpecBuilder v1SpecBuilder,
final VersionSet versionSet)
throws ExecutorManagerException {
for (String jobType: jobTypes) {
// Skip all the job types that are available in the azkaban base image and create init
// container for the remaining job types.
if(isPresentInIncludedJobTypes(jobType)) {
continue;
}
try {
String imageFullPath = versionSet.getVersion(jobType).get().pathWithVersion();
v1SpecBuilder.addJobType(jobType, imageFullPath, ImagePullPolicy.IF_NOT_PRESENT,
String.join("/", this.initMountPathPrefixForJobtypes, jobType),
String.join("/", this.appMountPathPrefixForJobtypes, jobType));
} catch (Exception e) {
throw new ExecutorManagerException("Did not find the version string for image type: " +
jobType + " in versionSet");
}
}
}
private void addSecretVolume(final AzKubernetesV1SpecBuilder v1SpecBuilder) {
v1SpecBuilder.addSecretVolume(secretVolume, secretName, secretMountpath);
}
/**
* This method is used to get jobTypes for a flow. This method is going to call
* populateJobTypeForFlow which has recursive method call to traverse the DAG for a flow.
*
* @param flow Executable flow object
* @return
* @throws ExecutorManagerException
*/
public TreeSet<String> getJobTypesForFlow(final ExecutableFlow flow) {
final TreeSet<String> jobTypes = new TreeSet<>();
populateJobTypeForFlow(flow, jobTypes);
return jobTypes;
}
/**
* This method is used to populate jobTypes for ExecutableNode.
*
* @param node
* @param jobTypes
*/
private void populateJobTypeForFlow(final ExecutableNode node, Set<String> jobTypes) {
if (node instanceof ExecutableFlowBase) {
final ExecutableFlowBase base = (ExecutableFlowBase) node;
for (ExecutableNode subNode : base.getExecutableNodes()) {
populateJobTypeForFlow(subNode, jobTypes);
}
} else {
jobTypes.add(node.getType());
}
}
/**
* This method is used to create service for flow container for execution id.
*
* @param executionId
* @throws ExecutorManagerException
*/
private void createService(final int executionId) throws ExecutorManagerException {
try {
final AzKubernetesV1ServiceBuilder azKubernetesV1ServiceBuilder =
new AzKubernetesV1ServiceBuilder(
"v1Service.yaml");
final V1Service serviceObject = azKubernetesV1ServiceBuilder
.withExecId(String.valueOf(executionId))
.withServiceName(getServiceName(executionId))
.withNamespace(this.namespace)
.withApiVersion(SERVICE_API_VERSION_2)
.withKind(MAPPING)
.withPort(String.valueOf(this.servicePort))
.withTimeoutMs(String.valueOf(this.serviceTimeout))
.build();
this.coreV1Api.createNamespacedService(this.namespace, serviceObject, null, null, null);
logger.info("ExecId: {}, Service is created.", executionId);
} catch (final IOException e) {
logger.error("ExecId: {}, Unable to create service in Kubernetes. Msg: {}", executionId, e.getMessage());
throw new ExecutorManagerException(e);
} catch (final ApiException e) {
logger.error("ExecId: {}, Unable to create service in Kubernetes. Msg: {} ",
executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to check whether service should be created in Kubernetes for flow container
* pod or not.
*
* @return
*/
private boolean isServiceRequired() {
return this.azkProps
.getBoolean(ContainerizedDispatchManagerProperties.KUBERNETES_SERVICE_REQUIRED, false);
}
/**
* This method is used to delete pod in Kubernetes. It will terminate the pod. deployment is
* fixed
*
* @param executionId
* @throws ExecutorManagerException
*/
private void deletePod(final int executionId) throws ExecutorManagerException {
try {
final String podName = getPodName(executionId);
this.coreV1Api.deleteNamespacedPod(podName, this.namespace, null, null,
null, null, null, new V1DeleteOptions());
logger.info("ExecId: {}, Action: Pod Deletion, Pod Name: {}", executionId, podName);
} catch (ApiException e) {
logger.error("ExecId: {}, Unable to delete Pod in Kubernetes: {}", executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to delete service in Kubernetes which is created for Pod.
*
* @param executionId
* @throws ExecutorManagerException
*/
public void deleteService(final int executionId) throws ExecutorManagerException {
final String serviceName = getServiceName(executionId);
try {
final V1Status deleteResult = this.coreV1Api.deleteNamespacedService(
serviceName,
this.namespace,
null,
null,
null,
null,
null,
new V1DeleteOptions());
logger.info("ExecId: {}, Action: Service Deletion, Service Name: {}, code: {}, message: {}",
executionId,
serviceName,
deleteResult.getCode(),
deleteResult.getMessage());
} catch (ApiException e) {
logger.error("ExecId: {}, Unable to delete service in Kubernetes: {}", executionId, e.getResponseBody());
throw new ExecutorManagerException(e);
}
}
/**
* This method is used to get service name. It will be created using service name prefix, azkaban
* cluster name and execution id.
*
* @param executionId
* @return
*/
private String getServiceName(final int executionId) {
return String.join("-", this.servicePrefix, this.clusterName, String.valueOf(executionId));
}
/**
* This method is used to get name of Pod based on naming convention. It will be created using pod
* name prefix, azkaban cluster name and execution id.
*
* @param executionId
* @return
*/
private String getPodName(final int executionId) {
return String.join("-", this.podPrefix, this.clusterName, String.valueOf(executionId));
}
}
| 1 | 21,205 | These configurations are currently overwritten in the props we provide in our config files. They need to be overridden there as well after you make this change. | azkaban-azkaban | java |
@@ -54,7 +54,12 @@ func PopulateMissingParameters(project *string, zone *string, region *string,
scratchBucketRegion := ""
if *scratchBucketGcsPath == "" {
- scratchBucketName, sbr, err := scratchBucketCreator.CreateScratchBucket(file, *project)
+ fallbackZone := *zone
+ if fallbackZone == "" && mgce.OnGCE() {
+ // try to get zone which Cloud Build is running in, ignoring error
+ fallbackZone, _ = mgce.Zone()
+ }
+ scratchBucketName, sbr, err := scratchBucketCreator.CreateScratchBucket(file, *project, fallbackZone)
scratchBucketRegion = sbr
if err != nil {
return err | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package param
import (
"context"
"fmt"
"log"
"strings"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/domain"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"google.golang.org/api/option"
)
// GetProjectID gets project id from flag if exists; otherwise, try to retrieve from GCE metadata.
func GetProjectID(mgce domain.MetadataGCEInterface, projectFlag string) (string, error) {
if projectFlag == "" {
if !mgce.OnGCE() {
return "", fmt.Errorf("project cannot be determined because build is not running on GCE")
}
aProject, err := mgce.ProjectID()
if err != nil || aProject == "" {
return "", fmt.Errorf("project cannot be determined %v", err)
}
return aProject, nil
}
return projectFlag, nil
}
// PopulateMissingParameters populate missing params for import/export cli tools
func PopulateMissingParameters(project *string, zone *string, region *string,
scratchBucketGcsPath *string, file string, mgce domain.MetadataGCEInterface,
scratchBucketCreator domain.ScratchBucketCreatorInterface,
zoneRetriever domain.ZoneRetrieverInterface,
storageClient domain.StorageClientInterface) error {
if err := PopulateProjectIfMissing(mgce, project); err != nil {
return err
}
scratchBucketRegion := ""
if *scratchBucketGcsPath == "" {
scratchBucketName, sbr, err := scratchBucketCreator.CreateScratchBucket(file, *project)
scratchBucketRegion = sbr
if err != nil {
return err
}
*scratchBucketGcsPath = fmt.Sprintf("gs://%v/", scratchBucketName)
} else {
scratchBucketName, err := storage.GetBucketNameFromGCSPath(*scratchBucketGcsPath)
if err != nil {
return fmt.Errorf("invalid scratch bucket GCS path %v", scratchBucketGcsPath)
}
scratchBucketAttrs, err := storageClient.GetBucketAttrs(scratchBucketName)
if err == nil {
scratchBucketRegion = scratchBucketAttrs.Location
}
}
if *zone == "" {
if aZone, err := zoneRetriever.GetZone(scratchBucketRegion, *project); err == nil {
*zone = aZone
} else {
return err
}
}
if err := PopulateRegion(region, *zone); err != nil {
return err
}
return nil
}
// PopulateProjectIfMissing populates project id for cli tools
func PopulateProjectIfMissing(mgce domain.MetadataGCEInterface, projectFlag *string) error {
var err error
*projectFlag, err = GetProjectID(mgce, *projectFlag)
return err
}
// PopulateRegion populates region based on the value extracted from zone param
func PopulateRegion(region *string, zone string) error {
aRegion, err := GetRegion(zone)
if err != nil {
return err
}
*region = aRegion
return nil
}
// GetRegion extracts region from a zones
func GetRegion(zone string) (string, error) {
if zone == "" {
return "", fmt.Errorf("zone is empty. Can't determine region")
}
zoneStrs := strings.Split(zone, "-")
if len(zoneStrs) < 2 {
return "", fmt.Errorf("%v is not a valid zone", zone)
}
return strings.Join(zoneStrs[:len(zoneStrs)-1], "-"), nil
}
// CreateComputeClient creates a new compute client
func CreateComputeClient(ctx *context.Context, oauth string, ce string) compute.Client {
computeOptions := []option.ClientOption{option.WithCredentialsFile(oauth)}
if ce != "" {
computeOptions = append(computeOptions, option.WithEndpoint(ce))
}
computeClient, err := compute.NewClient(*ctx, computeOptions...)
if err != nil {
log.Fatalf("compute client: %v", err)
}
return computeClient
}
| 1 | 9,066 | Why is error ignored here? | GoogleCloudPlatform-compute-image-tools | go |
@@ -20,6 +20,8 @@ const (
PartitionFlagGrub PartitionFlag = "grub"
// PartitionFlagBiosGrub indicates this is a bios grub boot partition
PartitionFlagBiosGrub PartitionFlag = "bios_grub"
+ // PartitionFlagBiosGrubLegacy indicates this is a bios grub boot partition. Needed to preserve legacy config behavior.
+ PartitionFlagBiosGrubLegacy PartitionFlag = "bios-grub"
// PartitionFlagBoot indicates this is a boot partition
PartitionFlagBoot PartitionFlag = "boot"
// PartitionFlagDeviceMapperRoot indicates this partition will be used for a device mapper root device | 1 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// Parser for the image builder's configuration schemas.
package configuration
import (
"encoding/json"
"fmt"
)
// PartitionFlag describes the features of a partition
type PartitionFlag string
const (
// PartitionFlagESP indicates this is the UEFI esp partition
PartitionFlagESP PartitionFlag = "esp"
// PartitionFlagGrub indicates this is a grub boot partition
PartitionFlagGrub PartitionFlag = "grub"
// PartitionFlagBiosGrub indicates this is a bios grub boot partition
PartitionFlagBiosGrub PartitionFlag = "bios_grub"
// PartitionFlagBoot indicates this is a boot partition
PartitionFlagBoot PartitionFlag = "boot"
// PartitionFlagDeviceMapperRoot indicates this partition will be used for a device mapper root device
PartitionFlagDeviceMapperRoot PartitionFlag = "dmroot"
)
func (p PartitionFlag) String() string {
return fmt.Sprint(string(p))
}
// GetValidPartitionFlags returns a list of all the supported
// partition flags
func (p *PartitionFlag) GetValidPartitionFlags() (types []PartitionFlag) {
return []PartitionFlag{
PartitionFlagESP,
PartitionFlagGrub,
PartitionFlagBiosGrub,
PartitionFlagBoot,
PartitionFlagDeviceMapperRoot,
}
}
// IsValid returns an error if the PartitionFlag is not valid
func (p *PartitionFlag) IsValid() (err error) {
for _, valid := range p.GetValidPartitionFlags() {
if *p == valid {
return
}
}
return fmt.Errorf("invalid value for Flag (%s)", p)
}
// UnmarshalJSON Unmarshals an PartitionFlag entry
func (p *PartitionFlag) UnmarshalJSON(b []byte) (err error) {
// Use an intermediate type which will use the default JSON unmarshal implementation
type IntermediateTypePartitionFlag PartitionFlag
err = json.Unmarshal(b, (*IntermediateTypePartitionFlag)(p))
if err != nil {
return fmt.Errorf("failed to parse [Flag]: %w", err)
}
// Now validate the resulting unmarshaled object
err = p.IsValid()
if err != nil {
return fmt.Errorf("failed to parse [Flag]: %w", err)
}
return
}
| 1 | 14,053 | >PartitionFlagBiosGrubLegacy PartitionFlag = "bios-grub" [](start = 1, length = 55) I presume this is to fix a breaking change...and it's too late to fix the breaking change (since it already escaped into the world?) | microsoft-CBL-Mariner | go |
@@ -159,7 +159,7 @@ public class ScanSummary {
removeTimeFilters(filters, Expressions.rewriteNot(scan.filter()));
Expression rowFilter = joinFilters(filters);
- Iterable<ManifestFile> manifests = table.currentSnapshot().manifests();
+ Iterable<ManifestFile> manifests = table.currentSnapshot().dataManifests();
boolean filterByTimestamp = !timeFilters.isEmpty();
Set<Long> snapshotsInTimeRange = Sets.newHashSet(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.io.IOException;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.function.Function;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.expressions.And;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expression.Operation;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.Literal;
import org.apache.iceberg.expressions.NamedReference;
import org.apache.iceberg.expressions.UnboundPredicate;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Comparators;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.Pair;
public class ScanSummary {
private ScanSummary() {
}
private static final ImmutableList<String> SCAN_SUMMARY_COLUMNS = ImmutableList.of(
"partition", "record_count", "file_size_in_bytes");
/**
* Create a scan summary builder for a table scan.
*
* @param scan a TableScan
* @return a scan summary builder
*/
public static ScanSummary.Builder of(TableScan scan) {
return new Builder(scan);
}
public static class Builder {
private static final Set<String> TIMESTAMP_NAMES = Sets.newHashSet(
"dateCreated", "lastUpdated");
private final TableScan scan;
private final Table table;
private final TableOperations ops;
private final Map<Long, Long> snapshotTimestamps;
private int limit = Integer.MAX_VALUE;
private boolean throwIfLimited = false;
private List<UnboundPredicate<Long>> timeFilters = Lists.newArrayList();
public Builder(TableScan scan) {
this.scan = scan;
this.table = scan.table();
this.ops = ((HasTableOperations) table).operations();
ImmutableMap.Builder<Long, Long> builder = ImmutableMap.builder();
for (Snapshot snap : table.snapshots()) {
builder.put(snap.snapshotId(), snap.timestampMillis());
}
this.snapshotTimestamps = builder.build();
}
private void addTimestampFilter(UnboundPredicate<Long> filter) {
throwIfLimited(); // ensure all partitions can be returned
timeFilters.add(filter);
}
public Builder after(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
return after(tsLiteral.value() / 1000);
}
public Builder after(long timestampMillis) {
addTimestampFilter(Expressions.greaterThanOrEqual("timestamp_ms", timestampMillis));
return this;
}
public Builder before(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
return before(tsLiteral.value() / 1000);
}
public Builder before(long timestampMillis) {
addTimestampFilter(Expressions.lessThanOrEqual("timestamp_ms", timestampMillis));
return this;
}
public Builder throwIfLimited() {
this.throwIfLimited = true;
return this;
}
public Builder limit(int numPartitions) {
this.limit = numPartitions;
return this;
}
private void removeTimeFilters(List<Expression> expressions, Expression expression) {
if (expression.op() == Operation.AND) {
And and = (And) expression;
removeTimeFilters(expressions, and.left());
removeTimeFilters(expressions, and.right());
return;
} else if (expression instanceof UnboundPredicate) {
UnboundPredicate pred = (UnboundPredicate) expression;
if (pred.term() instanceof NamedReference) {
NamedReference<?> ref = (NamedReference<?>) pred.term();
Literal<?> lit = pred.literal();
if (TIMESTAMP_NAMES.contains(ref.name())) {
Literal<Long> tsLiteral = lit.to(Types.TimestampType.withoutZone());
long millis = toMillis(tsLiteral.value());
addTimestampFilter(Expressions.predicate(pred.op(), "timestamp_ms", millis));
return;
}
}
}
expressions.add(expression);
}
/**
* Summarizes a table scan as a map of partition key to metrics for that partition.
*
* @return a map from partition key to metrics for that partition.
*/
public Map<String, PartitionMetrics> build() {
if (table.currentSnapshot() == null) {
return ImmutableMap.of(); // no snapshots, so there are no partitions
}
List<Expression> filters = Lists.newArrayList();
removeTimeFilters(filters, Expressions.rewriteNot(scan.filter()));
Expression rowFilter = joinFilters(filters);
Iterable<ManifestFile> manifests = table.currentSnapshot().manifests();
boolean filterByTimestamp = !timeFilters.isEmpty();
Set<Long> snapshotsInTimeRange = Sets.newHashSet();
if (filterByTimestamp) {
Pair<Long, Long> range = timestampRange(timeFilters);
long minTimestamp = range.first();
long maxTimestamp = range.second();
Snapshot oldestSnapshot = table.currentSnapshot();
for (Map.Entry<Long, Long> entry : snapshotTimestamps.entrySet()) {
long snapshotId = entry.getKey();
long timestamp = entry.getValue();
if (timestamp < oldestSnapshot.timestampMillis()) {
oldestSnapshot = ops.current().snapshot(snapshotId);
}
if (timestamp >= minTimestamp && timestamp <= maxTimestamp) {
snapshotsInTimeRange.add(snapshotId);
}
}
// if oldest known snapshot is in the range, then there may be an expired snapshot that has
// been removed that matched the range. because the timestamp of that snapshot is unknown,
// it can't be included in the results and the results are not reliable.
if (snapshotsInTimeRange.contains(oldestSnapshot.snapshotId()) &&
minTimestamp < oldestSnapshot.timestampMillis()) {
throw new IllegalArgumentException(
"Cannot satisfy time filters: time range may include expired snapshots");
}
// filter down to the the set of manifest files that were added after the start of the
// time range. manifests after the end of the time range must be included because
// compaction may create a manifest after the time range that includes files added in the
// range.
manifests = Iterables.filter(manifests, manifest -> {
if (manifest.snapshotId() == null) {
return true; // can't tell when the manifest was written, so it may contain matches
}
Long timestamp = snapshotTimestamps.get(manifest.snapshotId());
// if the timestamp is null, then its snapshot has expired. the check for the oldest
// snapshot ensures that all expired snapshots are not in the time range.
return timestamp != null && timestamp >= minTimestamp;
});
}
return computeTopPartitionMetrics(rowFilter, manifests, filterByTimestamp, snapshotsInTimeRange);
}
private Map<String, PartitionMetrics> computeTopPartitionMetrics(
Expression rowFilter,
Iterable<ManifestFile> manifests,
boolean filterByTimestamp,
Set<Long> snapshotsInTimeRange) {
TopN<String, PartitionMetrics> topN = new TopN<>(
limit, throwIfLimited, Comparators.charSequences());
try (CloseableIterable<ManifestEntry<DataFile>> entries = new ManifestGroup(ops.io(), manifests)
.specsById(ops.current().specsById())
.filterData(rowFilter)
.ignoreDeleted()
.select(SCAN_SUMMARY_COLUMNS)
.entries()) {
PartitionSpec spec = table.spec();
for (ManifestEntry<?> entry : entries) {
Long timestamp = snapshotTimestamps.get(entry.snapshotId());
// if filtering, skip timestamps that are outside the range
if (filterByTimestamp && !snapshotsInTimeRange.contains(entry.snapshotId())) {
continue;
}
String partition = spec.partitionToPath(entry.file().partition());
topN.update(partition, metrics -> (metrics == null ? new PartitionMetrics() : metrics)
.updateFromFile(entry.file(), timestamp));
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
return topN.get();
}
}
public static class PartitionMetrics {
private int fileCount = 0;
private long recordCount = 0L;
private long totalSize = 0L;
private Long dataTimestampMillis = null;
public int fileCount() {
return fileCount;
}
public long recordCount() {
return recordCount;
}
public long totalSize() {
return totalSize;
}
public Long dataTimestampMillis() {
return dataTimestampMillis;
}
PartitionMetrics updateFromCounts(int numFiles, long filesRecordCount, long filesSize,
Long timestampMillis) {
this.fileCount += numFiles;
this.recordCount += filesRecordCount;
this.totalSize += filesSize;
if (timestampMillis != null && (dataTimestampMillis == null || dataTimestampMillis < timestampMillis)) {
this.dataTimestampMillis = timestampMillis;
}
return this;
}
private PartitionMetrics updateFromFile(ContentFile<?> file, Long timestampMillis) {
this.fileCount += 1;
this.recordCount += file.recordCount();
this.totalSize += file.fileSizeInBytes();
if (timestampMillis != null &&
(dataTimestampMillis == null || dataTimestampMillis < timestampMillis)) {
this.dataTimestampMillis = timestampMillis;
}
return this;
}
@Override
public String toString() {
String dataTimestamp = dataTimestampMillis != null ?
new Date(dataTimestampMillis).toString() : null;
return "PartitionMetrics(fileCount=" + fileCount +
", recordCount=" + recordCount +
", totalSize=" + totalSize +
", dataTimestamp=" + dataTimestamp + ")";
}
}
private static class TopN<K, V> {
private final int maxSize;
private final boolean throwIfLimited;
private final SortedMap<K, V> map;
private final Comparator<? super K> keyComparator;
private K cut = null;
TopN(int maxSize, boolean throwIfLimited, Comparator<? super K> keyComparator) {
this.maxSize = maxSize;
this.throwIfLimited = throwIfLimited;
this.map = Maps.newTreeMap(keyComparator);
this.keyComparator = keyComparator;
}
public void update(K key, Function<V, V> updateFunc) {
// if there is a cut and it comes before the given key, do nothing
if (cut != null && keyComparator.compare(cut, key) <= 0) {
return;
}
// call the update function and add the result to the map
map.put(key, updateFunc.apply(map.get(key)));
// enforce the size constraint and update the cut if some keys are excluded
while (map.size() > maxSize) {
if (throwIfLimited) {
throw new IllegalStateException(
String.format("Too many matching keys: more than %d", maxSize));
}
this.cut = map.lastKey();
map.remove(cut);
}
}
public Map<K, V> get() {
return ImmutableMap.copyOf(map);
}
}
static Expression joinFilters(List<Expression> expressions) {
Expression result = Expressions.alwaysTrue();
for (Expression expression : expressions) {
result = Expressions.and(result, expression);
}
return result;
}
static long toMillis(long timestamp) {
if (timestamp < 10000000000L) {
// in seconds
return timestamp * 1000;
} else if (timestamp < 10000000000000L) {
// in millis
return timestamp;
}
// in micros
return timestamp / 1000;
}
static Pair<Long, Long> timestampRange(List<UnboundPredicate<Long>> timeFilters) {
// evaluation is inclusive
long minTimestamp = Long.MIN_VALUE;
long maxTimestamp = Long.MAX_VALUE;
for (UnboundPredicate<Long> pred : timeFilters) {
long value = pred.literal().value();
switch (pred.op()) {
case LT:
if (value - 1 < maxTimestamp) {
maxTimestamp = value - 1;
}
break;
case LT_EQ:
if (value < maxTimestamp) {
maxTimestamp = value;
}
break;
case GT:
if (value + 1 > minTimestamp) {
minTimestamp = value + 1;
}
break;
case GT_EQ:
if (value > minTimestamp) {
minTimestamp = value;
}
break;
case EQ:
if (value < maxTimestamp) {
maxTimestamp = value;
}
if (value > minTimestamp) {
minTimestamp = value;
}
break;
default:
throw new UnsupportedOperationException(
"Cannot filter timestamps using predicate: " + pred);
}
}
if (maxTimestamp < minTimestamp) {
throw new IllegalArgumentException(
"No timestamps can match filters: " + Joiner.on(", ").join(timeFilters));
}
return Pair.of(minTimestamp, maxTimestamp);
}
}
| 1 | 20,257 | Is this explicitly ignoring the effect of deleted rows on partition metrics or is it just that you are short circuiting any delete files (as we can't use them anyways) | apache-iceberg | java |
@@ -149,6 +149,12 @@ namespace Microsoft.DotNet.Execute
private string BuildParametersForCommand(Dictionary<string, string> commandParameters, string toolName)
{
string commandSetting = string.Empty;
+
+ if (Tools.ContainsKey(toolName))
+ {
+ commandSetting = Tools[toolName].osSpecific[Os]["defaultParameters"];
+ }
+
foreach (KeyValuePair<string, string> parameters in commandParameters)
{
if (!parameters.Key.Equals("toolName") && !string.IsNullOrEmpty(parameters.Value)) | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.IO;
using System.Collections.Generic;
using System.Text;
namespace Microsoft.DotNet.Execute
{
public class Setup
{
public Dictionary<string, Setting> Settings { get; set; }
public Dictionary<string, Command> Commands { get; set; }
public Dictionary<string, Tool> Tools { get; set; }
public Dictionary<string, string> SettingParameters { get; set; }
public string Os { get; set; }
public string ConfigurationFilePath { get; set; }
private string ParseSettingValue(string inputValue)
{
string value = string.Empty;
int length = inputValue.Length;
for (int i = 0; i < length; i++)
{
if (i != length - 1 && inputValue[i] == '$')
{
if (inputValue[i + 1] == '{')
{
int j;
string memberName = string.Empty;
for (j = i + 2; inputValue[j] != '}' && j < length; j++)
memberName += inputValue[j];
// The string is not of format ${}, just add the chars to the value.
if (j == length)
value += "${" + memberName;
else
value += SettingValueProvider.Get(memberName);
// Put i to j counter.
i = j;
}
else
{
// If next char is not { then add $ to the value.
value += inputValue[i];
}
}
else
{
value += inputValue[i];
}
}
return value;
}
private string FindSettingValue(string valueToFind)
{
Setting value;
if (Settings.TryGetValue(valueToFind, out value))
{
return ParseSettingValue(value.DefaultValue);
}
return null;
}
private string FindSettingType(string valueToFind)
{
Setting value;
if (Settings.TryGetValue(valueToFind, out value))
{
return value.ValueType;
}
return null;
}
public void prepareValues(string os, Dictionary<string, string> parameters, string configFile)
{
SettingParameters = new Dictionary<string, string>(parameters);
Os = os;
ConfigurationFilePath = configFile;
}
public int ExecuteCommand(string commandSelectedByUser, List<string> parametersSelectedByUser)
{
CompleteCommand commandToRun = BuildCommand(commandSelectedByUser, parametersSelectedByUser);
if (commandToRun != null)
{
Console.ForegroundColor = ConsoleColor.DarkYellow;
Console.WriteLine("Running: {0} {1}", commandToRun.ToolCommand, commandToRun.ParametersCommand);
Console.ResetColor();
int result = RunProcess.ExecuteProcess(commandToRun.ToolCommand, commandToRun.ParametersCommand);
if (result == 0)
{
Console.ForegroundColor = ConsoleColor.Green;
Console.WriteLine("Build Succeeded.");
}
else
{
Console.ForegroundColor = ConsoleColor.Red;
Console.WriteLine("Build Failed.");
}
Console.ResetColor();
return result;
}
return 1;
}
private CompleteCommand BuildCommand(string commandSelectedByUser, List<string> parametersSelectedByUser, Dictionary<string, string> parameters = null)
{
Command commandToExecute;
if (!Commands.TryGetValue(commandSelectedByUser, out commandToExecute))
{
Console.Error.WriteLine("Error: The command {0} is not specified in the Json file.", commandSelectedByUser);
return null;
}
string commandTool = GetTool(commandToExecute, Os, ConfigurationFilePath, parametersSelectedByUser);
if (string.IsNullOrEmpty(commandTool))
{
return null;
}
if (parameters == null)
{
if (BuildRequiredValueSettingsForCommand(commandToExecute, parametersSelectedByUser, SettingParameters) &&
BuildDefaultValueSettingsForCommand(commandToExecute, SettingParameters) &&
ValidExtraParametersForCommand(SettingParameters["ExtraParameters"], SettingParameters))
{
string commandParameters = BuildParametersForCommand(SettingParameters, SettingParameters["toolName"]);
CompleteCommand completeCommand = new CompleteCommand(commandTool, commandParameters);
return completeCommand;
}
return null;
}
else
{
string commandParameters = BuildParametersForCommand(parameters, SettingParameters["toolName"]);
CompleteCommand completeCommand = new CompleteCommand(commandTool, commandParameters);
return completeCommand;
}
}
private string BuildParametersForCommand(Dictionary<string, string> commandParameters, string toolName)
{
string commandSetting = string.Empty;
foreach (KeyValuePair<string, string> parameters in commandParameters)
{
if (!parameters.Key.Equals("toolName") && !string.IsNullOrEmpty(parameters.Value))
{
string value = parameters.Value.Equals("default") ? FindSettingValue(parameters.Key) : ParseSettingValue(parameters.Value);
commandSetting += string.Format(" {0}", FormatSetting(parameters.Key, value, FindSettingType(parameters.Key), toolName));
}
}
return commandSetting;
}
private bool BuildRequiredValueSettingsForCommand(Command commandToExecute, List<string> requiredSettings, Dictionary<string, string> commandValues)
{
foreach (string reqSetting in requiredSettings)
{
foreach (KeyValuePair<string, string> sett in commandToExecute.Alias[reqSetting].Settings)
{
string value = sett.Value;
string currentValue;
if (commandValues.TryGetValue(sett.Key, out currentValue))
{
if (string.IsNullOrEmpty(currentValue) || currentValue.Equals("default"))
{
commandValues[sett.Key] = value;
}
else if (!value.Equals("default") && !value.Equals(currentValue))
{
Console.Error.WriteLine("Error: The value for setting {0} can't be overwriten.", sett.Key);
return false;
}
}
else if (!sett.Key.Equals("toolName"))
{
Console.Error.WriteLine("Error: The setting {0} is not specified in the Json file.", sett.Key);
return false;
}
}
}
return true;
}
private bool BuildDefaultValueSettingsForCommand(Command commandToExecute, Dictionary<string, string> commandValues)
{
foreach (KeyValuePair<string, string> optSetting in commandToExecute.DefaultValues.Settings)
{
string currentValue;
if (commandValues.TryGetValue(optSetting.Key, out currentValue))
{
if (string.IsNullOrEmpty(currentValue))
{
commandValues[optSetting.Key] = optSetting.Value;
}
}
else
{
Console.Error.WriteLine("Error: The setting {0} is not specified in the Json file.", optSetting.Key);
return false;
}
}
return true;
}
private bool ValidExtraParametersForCommand(string extraParameters, Dictionary<string, string> commandValues)
{
int namePos, valuePos;
string tempParam, name, value;
if (string.IsNullOrEmpty(extraParameters))
{
return true;
}
string[] extraA = extraParameters.Split(' ');
foreach (string param in extraA)
{
namePos = 0;
valuePos = param.Length;
tempParam = param;
namePos = param.IndexOf(":");
if (namePos != -1)
{
tempParam = param.Substring(namePos + 1);
}
valuePos = tempParam.IndexOf("=");
if (valuePos != -1)
{
name = tempParam.Substring(0, valuePos);
value = tempParam.Substring(valuePos + 1);
}
else
{
name = tempParam;
value = string.Empty;
}
string paramValue;
if (commandValues.TryGetValue(name, out paramValue) && !string.IsNullOrEmpty(paramValue) && !paramValue.Equals("default") && !value.Equals(paramValue))
{
Console.Error.WriteLine("Error: The value for setting {0} can't be overwriten.", name);
return false;
}
}
return true;
}
private string GetTool(Command commandToExecute, string os, string configPath, List<string> parametersSelectedByUser)
{
string toolname = commandToExecute.DefaultValues.ToolName;
string project = GetProject(commandToExecute, parametersSelectedByUser);
if (Tools.ContainsKey(toolname))
{
SettingParameters["toolName"] = toolname;
if (toolname.Equals("msbuild"))
{
return Path.GetFullPath(Path.Combine(configPath, os.Equals("windows") ? Tools[toolname].Run["windows"] : Tools[toolname].Run["unix"]));
}
else if (toolname.Equals("terminal"))
{
string extension = os.Equals("windows") ? Tools[toolname].Run["windows"] : Tools[toolname].Run["unix"];
return Path.GetFullPath(Path.Combine(configPath, string.Format("{0}.{1}", project, extension)));
}
}
Console.Error.WriteLine("Error: The process {0} is not specified in the Json file.", toolname);
return string.Empty;
}
private string GetProject(Command commandToExecute, List<string> parametersSelectedByUser)
{
string project = string.Empty;
bool moreThanOneProject = false;
foreach (string param in parametersSelectedByUser)
{
if (commandToExecute.Alias[param].Settings.TryGetValue("Project", out project))
{
if (moreThanOneProject)
{
Console.Error.WriteLine("Error: There can only be one project execution per command.");
return string.Empty;
}
moreThanOneProject = true;
}
}
if (string.IsNullOrEmpty(project))
{
project = commandToExecute.DefaultValues.Project;
}
return project;
}
public string FormatSetting(string option, string value, string type, string toolName)
{
string commandOption = null;
if (type.Equals("passThrough"))
{
commandOption = string.Format(" {0}", toolName.Equals("console") ? "" : value);
}
else
{
Tool toolFormat;
if (Tools.TryGetValue(toolName, out toolFormat) && !string.IsNullOrEmpty(type))
{
if (toolFormat.ValueTypes.TryGetValue(type, out commandOption))
{
commandOption = commandOption.Replace("{name}", option).Replace("{value}", value);
}
else
{
Console.Error.WriteLine("The type \"{0}\" is not defined as a Value Type of the tool \"{1}\". Parameter ignored", type, toolName);
return null;
}
}
}
return commandOption;
}
public string GetHelpCommand(string commandName, string alias = null)
{
Command commandToPrint;
if (Commands.TryGetValue(commandName, out commandToPrint))
{
StringBuilder sb = new StringBuilder();
Dictionary<string, string> commandParametersToPrint = new Dictionary<string, string>();
sb.AppendLine().Append("Settings: ").AppendLine();
sb.Append(GetHelpAlias(commandToPrint.Alias[alias].Settings, commandParametersToPrint));
//sb.AppendLine().Append("Default Settings for action (values can be overwritten): ").AppendLine();
sb.Append(GetHelpAlias(commandToPrint.DefaultValues.Settings, commandParametersToPrint));
CompleteCommand completeCommand = BuildCommand(commandName, new List<string>(alias.Split(' ')), commandParametersToPrint);
sb.AppendLine().Append("It will run: ").AppendLine();
sb.Append(string.Format("{0} {1}", completeCommand.ToolCommand, completeCommand.ParametersCommand));
return sb.ToString();
}
return null;
}
private string GetHelpAlias(Dictionary<string, string> settings, Dictionary<string, string> commandParametersToPrint)
{
StringBuilder sb = new StringBuilder();
foreach (KeyValuePair<string, string> setting in settings)
{
string value = setting.Value.Equals("default") ? FindSettingValue(setting.Key) : setting.Value;
sb.Append(string.Format(" {0} = {2}", setting.Key, FindSettingType(setting.Key), value)).AppendLine();
commandParametersToPrint[setting.Key] = string.IsNullOrEmpty(value) ? "True" : value;
}
return sb.ToString();
}
private class CompleteCommand
{
public string ToolCommand { get { return _toolCommand; } }
public string ParametersCommand { get { return _parametersCommand; } }
internal CompleteCommand(string tool, string parameters)
{
_toolCommand = tool;
_parametersCommand = parameters;
}
private string _toolCommand;
private string _parametersCommand;
}
}
public class AliasPerCommand
{
public string Description { get; set; }
public Dictionary<string, string> Settings { get; set; }
}
public class DefaultValuesPerCommand
{
public string Project { get; set; }
public string ToolName { get; set; }
public Dictionary<string, string> Settings { get; set; }
}
public class Command
{
public Dictionary<string, AliasPerCommand> Alias { get; set; }
public DefaultValuesPerCommand DefaultValues { get; set; }
}
public class Tool
{
public Dictionary<string, string> Run { get; set; }
public Dictionary<string, string> ValueTypes { get; set; }
}
public class Setting
{
public string Description { get; set; }
public string ValueType { get; set; }
public List<string> Values { get; set; }
public string DefaultValue { get; set; }
}
}
| 1 | 10,608 | I would also check whether or not this "defaultParameters" is null or empty to allow for people to omit it from the config file. | dotnet-buildtools | .cs |
@@ -170,5 +170,5 @@ func (event *Event) IsToEdge() bool {
// GetContent dumps the content to string
func (event *Event) GetContent() string {
- return fmt.Sprintf("%v", event.Content)
+ return fmt.Sprintf("%s", event.Content)
} | 1 | package model
import (
// Mapping value of json to struct member
_ "encoding/json"
"fmt"
"strings"
"github.com/kubeedge/beehive/pkg/common/log"
"github.com/kubeedge/beehive/pkg/core/model"
)
// constants for resource types
const (
ResNode = "node"
ResMember = "membership"
ResTwin = "twin"
ResAuth = "auth_info"
ResDevice = "device"
)
// constants for resource operations
const (
OpGet = "get"
OpResult = "get_result"
OpList = "list"
OpDetail = "detail"
OpDelta = "delta"
OpDoc = "document"
OpUpdate = "updated"
OpInsert = "insert"
OpDelete = "deleted"
OpConnect = "connected"
OpDisConnect = "disconnected"
OpKeepalive = "keepalive"
)
// constants for message group
const (
GpResource = "resource"
)
// constants for message source
const (
SrcCloudHub = "cloudhub"
SrcController = "controller"
SrcManager = "edgemgr"
)
// HubInfo saves identifier information for edge hub
type HubInfo struct {
ProjectID string
NodeID string
}
// UserGroupInfo struct
type UserGroupInfo struct {
Resource string `json:"resource"`
Operation string `json:"operation"`
}
// Event represents message communicated between cloud hub and edge hub
type Event struct {
Group string `json:"msg_group"`
Source string `json:"source"`
UserGroup UserGroupInfo `json:"user_group"`
ID string `json:"msg_id"`
ParentID string `json:"parent_msg_id"`
Timestamp int64 `json:"timestamp"`
Content interface{} `json:"content"`
}
// EventToMessage converts an event to a model message
func EventToMessage(event *Event) model.Message {
var msg model.Message
msg.BuildHeader(event.ID, event.ParentID, event.Timestamp)
msg.BuildRouter(event.Source, event.Group, event.UserGroup.Resource, event.UserGroup.Operation)
msg.FillBody(event.Content)
return msg
}
// MessageToEvent converts a model message to an event
func MessageToEvent(msg *model.Message) Event {
var event Event
event.ID = msg.GetID()
event.ParentID = msg.GetParentID()
event.Timestamp = msg.GetTimestamp()
event.Source = msg.GetSource()
event.Group = msg.GetGroup()
event.Content = msg.GetContent()
event.UserGroup = UserGroupInfo{
Resource: msg.GetResource(),
Operation: msg.GetOperation(),
}
return event
}
// NewResource constructs a resource field using resource type and ID
func NewResource(resType, resID string, info *HubInfo) string {
var prefix string
if info != nil {
prefix = fmt.Sprintf("%s/%s/", "node", info.NodeID)
}
if resID == "" {
return fmt.Sprintf("%s%s", prefix, resType)
}
return fmt.Sprintf("%s%s/%s", prefix, resType, resID)
}
// IsNodeStopped indicates if the node is stopped or running
func (event *Event) IsNodeStopped() bool {
tokens := strings.Split(event.UserGroup.Resource, "/")
if len(tokens) != 2 || tokens[0] != ResNode {
return false
}
if event.UserGroup.Operation == OpDelete {
return true
}
if event.UserGroup.Operation != OpUpdate || event.Content == nil {
return false
}
body, ok := event.Content.(map[string]interface{})
if !ok {
log.LOGGER.Errorf("fail to decode node update message: %s, type is %T", event.GetContent(), event.Content)
// it can't be determined if the node has stopped
return false
}
// trust struct of json body
action, ok := body["action"]
if !ok || action.(string) != "stop" {
return false
}
return true
}
// isFromEdge judges if the event is sent from edge
func (event *Event) IsFromEdge() bool {
return true
}
// IsToEdge judges if the vent should be sent to edge
func (event *Event) IsToEdge() bool {
if event.Source != SrcManager {
return true
}
resource := event.UserGroup.Resource
if strings.HasPrefix(resource, ResNode) {
tokens := strings.Split(resource, "/")
if len(tokens) >= 3 {
resource = strings.Join(tokens[2:], "/")
}
}
// apply special check for edge manager
resOpMap := map[string][]string{
ResMember: {OpGet},
ResTwin: {OpDelta, OpDoc, OpGet},
ResAuth: {OpGet},
ResNode: {OpDelete},
}
for res, ops := range resOpMap {
for _, op := range ops {
if event.UserGroup.Operation == op && strings.Contains(resource, res) {
return false
}
}
}
return true
}
// GetContent dumps the content to string
func (event *Event) GetContent() string {
return fmt.Sprintf("%v", event.Content)
}
| 1 | 10,438 | I think event.Content is of type interface{}, so we may need %v here? | kubeedge-kubeedge | go |
@@ -17,10 +17,12 @@
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+"""Legacy BibEdit."""
+
import warnings
-from invenio.utils.deprecation import RemovedInInvenio22Warning
+from invenio.utils.deprecation import RemovedInInvenio23Warning
-warnings.warn("BibEdit will be removed in 2.2. Please check "
+warnings.warn("Legacy BibEdit will be removed in 2.3. Please check "
"'invenio.modules.editor' module.",
- RemovedInInvenio22Warning)
+ RemovedInInvenio23Warning) | 1 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import warnings
from invenio.utils.deprecation import RemovedInInvenio22Warning
warnings.warn("BibEdit will be removed in 2.2. Please check "
"'invenio.modules.editor' module.",
RemovedInInvenio22Warning)
| 1 | 16,020 | Note: `editor` depends on legacy `BibEdit` | inveniosoftware-invenio | py |
@@ -39,5 +39,5 @@ class InputDevice(object):
def clear_actions(self):
self.actions = []
- def create_pause(self, duraton=0):
+ def create_pause(self, duration=0):
pass | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import uuid
class InputDevice(object):
"""
Describes the input device being used for the action.
"""
def __init__(self, name=None):
if name is None:
self.name = uuid.uuid4()
else:
self.name = name
self.actions = []
def add_action(self, action):
"""
"""
self.actions.append(action)
def clear_actions(self):
self.actions = []
def create_pause(self, duraton=0):
pass
| 1 | 14,870 | we should probably deprecate (and display a warning) the misspelled keyword arg here rather than removing it... and then add the new one. This changes a public API and will break any code that is currently using the misspelled version. | SeleniumHQ-selenium | rb |
@@ -2608,3 +2608,16 @@ class GroupByTest(ReusedSQLTestCase, TestUtils):
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group("mammal")
)
+
+ def test_median(self):
+ kdf = ks.DataFrame(
+ {
+ "a": [1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0],
+ "b": [2.0, 3.0, 1.0, 4.0, 6.0, 9.0, 8.0, 10.0, 7.0, 5.0],
+ "c": [3.0, 5.0, 2.0, 5.0, 1.0, 2.0, 6.0, 4.0, 3.0, 6.0],
+ },
+ columns=["a", "b", "c"],
+ index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6],
+ )
+ with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"):
+ kdf.groupby("a").median(accuracy="a") | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from databricks import koalas as ks
from databricks.koalas.config import option_context
from databricks.koalas.exceptions import PandasNotImplementedError, DataError
from databricks.koalas.missing.groupby import (
MissingPandasLikeDataFrameGroupBy,
MissingPandasLikeSeriesGroupBy,
)
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
from databricks.koalas.groupby import is_multi_agg_with_relabel
class GroupByTest(ReusedSQLTestCase, TestUtils):
def test_groupby_simple(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ks.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("a").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index).sum()),
sort(pdf.groupby("a", as_index=as_index).sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index).b.sum()),
sort(pdf.groupby("a", as_index=as_index).b.sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)["b"].sum()),
sort(pdf.groupby("a", as_index=as_index)["b"].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
sort(pdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)[[]].sum()),
sort(pdf.groupby("a", as_index=as_index)[[]].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)["c"].sum()),
sort(pdf.groupby("a", as_index=as_index)["c"].sum()),
)
self.assert_eq(kdf.groupby("a").a.sum().sort_index(), pdf.groupby("a").a.sum().sort_index())
self.assert_eq(
kdf.groupby("a")["a"].sum().sort_index(), pdf.groupby("a")["a"].sum().sort_index()
)
self.assert_eq(
kdf.groupby("a")[["a"]].sum().sort_index(), pdf.groupby("a")[["a"]].sum().sort_index()
)
self.assert_eq(
kdf.groupby("a")[["a", "c"]].sum().sort_index(),
pdf.groupby("a")[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b).sum().sort_index(), pdf.a.groupby(pdf.b).sum().sort_index()
)
for axis in [0, "index"]:
self.assert_eq(
kdf.groupby("a", axis=axis).a.sum().sort_index(),
pdf.groupby("a", axis=axis).a.sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)["a"].sum().sort_index(),
pdf.groupby("a", axis=axis)["a"].sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b, axis=axis).sum().sort_index(),
pdf.a.groupby(pdf.b, axis=axis).sum().sort_index(),
)
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False).a)
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)["a"])
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)[["a"]])
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: kdf.groupby("z", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: kdf.groupby(["z"], as_index=False)[["a", "c"]])
self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.b, as_index=False))
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a", axis=1))
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a", axis="columns"))
self.assertRaises(ValueError, lambda: kdf.groupby("a", "b"))
self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.a, kdf.b))
# we can't use column name/names as a parameter `by` for `SeriesGroupBy`.
self.assertRaises(KeyError, lambda: kdf.a.groupby(by="a"))
self.assertRaises(KeyError, lambda: kdf.a.groupby(by=["a", "b"]))
self.assertRaises(KeyError, lambda: kdf.a.groupby(by=("a", "b")))
# we can't use DataFrame as a parameter `by` for `DataFrameGroupBy`/`SeriesGroupBy`.
self.assertRaises(ValueError, lambda: kdf.groupby(kdf))
self.assertRaises(ValueError, lambda: kdf.a.groupby(kdf))
self.assertRaises(ValueError, lambda: kdf.a.groupby((kdf,)))
# non-string names
pdf = pd.DataFrame(
{
10: [1, 2, 6, 4, 4, 6, 4, 3, 7],
20: [4, 2, 7, 3, 3, 1, 1, 1, 2],
30: [4, 2, 7, 3, None, 1, 1, 1, 2],
40: list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ks.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(10).reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index).sum()),
sort(pdf.groupby(10, as_index=as_index).sum()),
)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index)[20].sum()),
sort(pdf.groupby(10, as_index=as_index)[20].sum()),
)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
sort(pdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
)
def test_groupby_multiindex_columns(self):
pdf = pd.DataFrame(
{
(10, "a"): [1, 2, 6, 4, 4, 6, 4, 3, 7],
(10, "b"): [4, 2, 7, 3, 3, 1, 1, 1, 2],
(20, "c"): [4, 2, 7, 3, None, 1, 1, 1, 2],
(30, "d"): list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby((10, "a")).sum().sort_index(), pdf.groupby((10, "a")).sum().sort_index()
)
self.assert_eq(
kdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
pdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
)
self.assert_eq(
kdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
pdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
)
# TODO: a pandas bug?
# expected = pdf.groupby((10, "a"))[(20, "c")].sum().sort_index()
expected = pd.Series(
[4.0, 2.0, 1.0, 4.0, 8.0, 2.0],
name=(20, "c"),
index=pd.Index([1, 2, 3, 4, 6, 7], name=(10, "a")),
)
self.assert_eq(kdf.groupby((10, "a"))[(20, "c")].sum().sort_index(), expected)
if LooseVersion(pd.__version__) < LooseVersion("1.1.3"):
self.assert_eq(
kdf[(20, "c")].groupby(kdf[(10, "a")]).sum().sort_index(),
pdf[(20, "c")].groupby(pdf[(10, "a")]).sum().sort_index(),
)
else:
# seems like a pandas bug introduced in pandas 1.1.3.
self.assert_eq(kdf[(20, "c")].groupby(kdf[(10, "a")]).sum().sort_index(), expected)
def test_split_apply_combine_on_series(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ks.from_pandas(pdf)
funcs = [
((True, False), ["sum", "min", "max", "count", "first", "last"]),
((True, True), ["mean"]),
((False, False), ["var", "std"]),
]
funcs = [(check_exact, almost, f) for (check_exact, almost), fs in funcs for f in fs]
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for check_exact, almost, func in funcs:
for kkey, pkey in [("b", "b"), (kdf.b, pdf.b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
if as_index is True or func != "std":
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
else:
# seems like a pandas' bug for as_index=False and func == "std"?
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(pdf.groupby(pkey, as_index=True).a.std().reset_index()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(pdf.groupby(pkey, as_index=True).std().reset_index()),
check_exact=check_exact,
almost=almost,
)
for kkey, pkey in [(kdf.b + 1, pdf.b + 1), (kdf.copy().b, pdf.copy().b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for i in [0, 4, 7]:
with self.subTest(as_index=as_index, func=func, i=i):
self.assert_eq(
sort(getattr(kdf.groupby(kdf.b > i, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kdf.b > i, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for kkey, pkey in [
(kdf.b, pdf.b),
(kdf.b + 1, pdf.b + 1),
(kdf.copy().b, pdf.copy().b),
(kdf.b.rename(), pdf.b.rename()),
]:
with self.subTest(func=func, key=pkey):
self.assert_eq(
getattr(kdf.a.groupby(kkey), func)().sort_index(),
getattr(pdf.a.groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((kdf.a + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.a + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((kdf.b + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.b + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr(kdf.a.rename().groupby(kkey), func)().sort_index(),
getattr(pdf.a.rename().groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
def test_aggregate(self):
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
kdf = ks.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for kkey, pkey in [("A", "A"), (kdf.A, pdf.A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
kdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
if as_index:
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
else:
# seems like a pandas' bug for as_index=False and func_or_funcs is list?
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=True).agg(["sum"]).reset_index()),
)
for kkey, pkey in [(kdf.A + 1, pdf.A + 1), (kdf.copy().A, pdf.copy().A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
kdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
expected_error_message = (
r"aggs must be a dict mapping from column name to aggregate functions "
r"\(string or list of strings\)."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kdf.groupby("A", as_index=as_index).agg(0)
# multi-index columns
columns = pd.MultiIndex.from_tuples([(10, "A"), (10, "B"), (20, "C")])
pdf.columns = columns
kdf.columns = columns
for as_index in [True, False]:
stats_kdf = kdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
stats_pdf = pdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
self.assert_eq(
stats_kdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
stats_pdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
)
stats_kdf = kdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
stats_pdf = pdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
self.assert_eq(
stats_kdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
stats_pdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
)
# non-string names
pdf.columns = [10, 20, 30]
kdf.columns = [10, 20, 30]
for as_index in [True, False]:
stats_kdf = kdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
stats_pdf = pdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
self.assert_eq(
stats_kdf.sort_values(by=[20, 30]).reset_index(drop=True),
stats_pdf.sort_values(by=[20, 30]).reset_index(drop=True),
)
stats_kdf = kdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
stats_pdf = pdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
self.assert_eq(
stats_kdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
stats_pdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
)
def test_aggregate_func_str_list(self):
# this is test for cases where only string or list is assigned
pdf = pd.DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
kdf = ks.from_pandas(pdf)
agg_funcs = ["max", "min", ["min", "max"]]
for aggfunc in agg_funcs:
# Since in Koalas groupby, the order of rows might be different
# so sort on index to ensure they have same output
sorted_agg_kdf = kdf.groupby("kind").agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby("kind").agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_kdf, sorted_agg_pdf)
# test on multi index column case
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
kdf = ks.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
kdf.columns = columns
for aggfunc in agg_funcs:
sorted_agg_kdf = kdf.groupby(("X", "A")).agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby(("X", "A")).agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_kdf, sorted_agg_pdf)
@unittest.skipIf(pd.__version__ < "0.25.0", "not supported before pandas 0.25.0")
def test_aggregate_relabel(self):
# this is to test named aggregation in groupby
pdf = pd.DataFrame({"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]})
kdf = ks.from_pandas(pdf)
# different agg column, same function
agg_pdf = pdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
agg_kdf = kdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# same agg column, different functions
agg_pdf = pdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
agg_kdf = kdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# test on NamedAgg
agg_pdf = (
pdf.groupby("group").agg(b_max=pd.NamedAgg(column="B", aggfunc="max")).sort_index()
)
agg_kdf = (
kdf.groupby("group").agg(b_max=ks.NamedAgg(column="B", aggfunc="max")).sort_index()
)
self.assert_eq(agg_kdf, agg_pdf)
# test on NamedAgg multi columns aggregation
agg_pdf = (
pdf.groupby("group")
.agg(
b_max=pd.NamedAgg(column="B", aggfunc="max"),
b_min=pd.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
agg_kdf = (
kdf.groupby("group")
.agg(
b_max=ks.NamedAgg(column="B", aggfunc="max"),
b_min=ks.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
self.assert_eq(agg_kdf, agg_pdf)
def test_dropna(self):
pdf = pd.DataFrame(
{"A": [None, 1, None, 1, 2], "B": [1, 2, 3, None, None], "C": [4, 5, 6, 7, None]}
)
kdf = ks.from_pandas(pdf)
# pd.DataFrame.groupby with dropna parameter is implemented since pandas 1.1.0
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna).std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).std()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
)
self.assert_eq(
sort(
kdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
sort(
pdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
)
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
kdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
sort(
pdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
kdf.columns = columns
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
sorted_stats_kdf = sort(
kdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
sorted_stats_pdf = sort(
pdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
self.assert_eq(sorted_stats_kdf, sorted_stats_pdf)
else:
# Testing dropna=True (pandas default behavior)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=True)["B"].min()),
sort(pdf.groupby("A", as_index=as_index)["B"].min()),
)
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
kdf.groupby(["A", "B"], as_index=as_index, dropna=True).agg(
{"C": ["min", "std"]}
)
),
sort(pdf.groupby(["A", "B"], as_index=as_index).agg({"C": ["min", "std"]})),
almost=True,
)
# Testing dropna=False
index = pd.Index([1.0, 2.0, np.nan], name="A")
expected = pd.Series([2.0, np.nan, 1.0], index=index, name="B")
result = kdf.groupby("A", as_index=True, dropna=False)["B"].min().sort_index()
self.assert_eq(expected, result)
expected = pd.DataFrame({"A": [1.0, 2.0, np.nan], "B": [2.0, np.nan, 1.0]})
result = (
kdf.groupby("A", as_index=False, dropna=False)["B"]
.min()
.sort_values("A")
.reset_index(drop=True)
)
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[(1.0, 2.0), (1.0, None), (2.0, None), (None, 1.0), (None, 3.0)], names=["A", "B"]
)
expected = pd.DataFrame(
{
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
},
index=index,
)
result = (
kdf.groupby(["A", "B"], as_index=True, dropna=False)
.agg({"C": ["min", "std"]})
.sort_index()
)
self.assert_eq(expected, result)
expected = pd.DataFrame(
{
("A", ""): [1.0, 1.0, 2.0, np.nan, np.nan],
("B", ""): [2.0, np.nan, np.nan, 1.0, 3.0],
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
result = (
kdf.groupby(["A", "B"], as_index=False, dropna=False)
.agg({"C": ["min", "std"]})
.sort_values(["A", "B"])
.reset_index(drop=True)
)
self.assert_eq(expected, result)
def test_describe(self):
# support for numeric type, not support for string type yet
datas = []
datas.append({"a": [1, 1, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
datas.append({"a": [-1, -1, -3], "b": [-4, -5, -6], "c": [-7, -8, -9]})
datas.append({"a": [0, 0, 0], "b": [0, 0, 0], "c": [0, 8, 0]})
# it is okay if string type column as a group key
datas.append({"a": ["a", "a", "c"], "b": [4, 5, 6], "c": [7, 8, 9]})
percentiles = [0.25, 0.5, 0.75]
formatted_percentiles = ["25%", "50%", "75%"]
non_percentile_stats = ["count", "mean", "std", "min", "max"]
for data in datas:
pdf = pd.DataFrame(data)
kdf = ks.from_pandas(pdf)
describe_pdf = pdf.groupby("a").describe().sort_index()
describe_kdf = kdf.groupby("a").describe().sort_index()
# since the result of percentile columns are slightly difference from pandas,
# we should check them separately: non-percentile columns & percentile columns
# 1. Check that non-percentile columns are equal.
agg_cols = [col.name for col in kdf.groupby("a")._agg_columns]
self.assert_eq(
describe_kdf.drop(list(product(agg_cols, formatted_percentiles))),
describe_pdf.drop(columns=formatted_percentiles, level=1),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby("a").quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_kdf.drop(list(product(agg_cols, non_percentile_stats))),
quantile_pdf.rename(columns="{:.0%}".format, level=1),
)
# not support for string type yet
datas = []
datas.append({"a": ["a", "a", "c"], "b": ["d", "e", "f"], "c": ["g", "h", "i"]})
datas.append({"a": ["a", "a", "c"], "b": [4, 0, 1], "c": ["g", "h", "i"]})
for data in datas:
pdf = pd.DataFrame(data)
kdf = ks.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a").describe().sort_index())
# multi-index columns
pdf = pd.DataFrame({("x", "a"): [1, 1, 3], ("x", "b"): [4, 5, 6], ("y", "c"): [7, 8, 9]})
kdf = ks.from_pandas(pdf)
describe_pdf = pdf.groupby(("x", "a")).describe().sort_index()
describe_kdf = kdf.groupby(("x", "a")).describe().sort_index()
# 1. Check that non-percentile columns are equal.
agg_column_labels = [col._column_label for col in kdf.groupby(("x", "a"))._agg_columns]
self.assert_eq(
describe_kdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, formatted_percentiles)
]
),
describe_pdf.drop(columns=formatted_percentiles, level=2),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby(("x", "a")).quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_kdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, non_percentile_stats)
]
),
quantile_pdf.rename(columns="{:.0%}".format, level=2),
)
def test_aggregate_relabel_multiindex(self):
pdf = pd.DataFrame({"A": [0, 1, 2, 3], "B": [5, 6, 7, 8], "group": ["a", "a", "b", "b"]})
pdf.columns = pd.MultiIndex.from_tuples([("y", "A"), ("y", "B"), ("x", "group")])
kdf = ks.from_pandas(pdf)
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = pdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
agg_kdf = kdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# same column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_kdf = (
kdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_kdf)
# different column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [6, 8], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_kdf = (
kdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_kdf)
def test_all_any(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
"B": [True, True, True, False, False, False, None, True, None, False],
}
)
kdf = ks.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).all()),
sort(pdf.groupby("A", as_index=as_index).all()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).any()),
sort(pdf.groupby("A", as_index=as_index).any()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).all()).B,
sort(pdf.groupby("A", as_index=as_index).all()).B,
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).any()).B,
sort(pdf.groupby("A", as_index=as_index).any()).B,
)
self.assert_eq(
kdf.B.groupby(kdf.A).all().sort_index(), pdf.B.groupby(pdf.A).all().sort_index()
)
self.assert_eq(
kdf.B.groupby(kdf.A).any().sort_index(), pdf.B.groupby(pdf.A).any().sort_index()
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
kdf.columns = columns
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby(("X", "A"), as_index=as_index).all()),
sort(pdf.groupby(("X", "A"), as_index=as_index).all()),
)
self.assert_eq(
sort(kdf.groupby(("X", "A"), as_index=as_index).any()),
sort(pdf.groupby(("X", "A"), as_index=as_index).any()),
)
def test_raises(self):
kdf = ks.DataFrame(
{"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
# test raises with incorrect key
self.assertRaises(ValueError, lambda: kdf.groupby([]))
self.assertRaises(KeyError, lambda: kdf.groupby("x"))
self.assertRaises(KeyError, lambda: kdf.groupby(["a", "x"]))
self.assertRaises(KeyError, lambda: kdf.groupby("a")["x"])
self.assertRaises(KeyError, lambda: kdf.groupby("a")["b", "x"])
self.assertRaises(KeyError, lambda: kdf.groupby("a")[["b", "x"]])
def test_nunique(self):
pdf = pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("a").agg({"b": "nunique"}).sort_index(),
pdf.groupby("a").agg({"b": "nunique"}).sort_index(),
)
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ks.DataFrame({"b": [2, 2]}, index=pd.Index([0, 1], name="a"))
self.assert_eq(kdf.groupby("a").nunique().sort_index(), expected)
self.assert_eq(
kdf.groupby("a").nunique(dropna=False).sort_index(), expected,
)
else:
self.assert_eq(
kdf.groupby("a").nunique().sort_index(), pdf.groupby("a").nunique().sort_index()
)
self.assert_eq(
kdf.groupby("a").nunique(dropna=False).sort_index(),
pdf.groupby("a").nunique(dropna=False).sort_index(),
)
self.assert_eq(
kdf.groupby("a")["b"].nunique().sort_index(),
pdf.groupby("a")["b"].nunique().sort_index(),
)
self.assert_eq(
kdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
pdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
)
nunique_kdf = kdf.groupby("a", as_index=False).agg({"b": "nunique"})
nunique_pdf = pdf.groupby("a", as_index=False).agg({"b": "nunique"})
self.assert_eq(
nunique_kdf.sort_values(["a", "b"]).reset_index(drop=True),
nunique_pdf.sort_values(["a", "b"]).reset_index(drop=True),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ks.DataFrame({("y", "b"): [2, 2]}, index=pd.Index([0, 1], name=("x", "a")))
self.assert_eq(
kdf.groupby(("x", "a")).nunique().sort_index(), expected,
)
self.assert_eq(
kdf.groupby(("x", "a")).nunique(dropna=False).sort_index(), expected,
)
else:
self.assert_eq(
kdf.groupby(("x", "a")).nunique().sort_index(),
pdf.groupby(("x", "a")).nunique().sort_index(),
)
self.assert_eq(
kdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
pdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
)
def test_unique(self):
for pdf in [
pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
),
pd.DataFrame(
{
"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
"b": ["w", "w", "w", "x", "x", "y", "y", "z", "z", "z"],
}
),
]:
with self.subTest(pdf=pdf):
kdf = ks.from_pandas(pdf)
actual = kdf.groupby("a")["b"].unique().sort_index().to_pandas()
expect = pdf.groupby("a")["b"].unique().sort_index()
self.assert_eq(len(actual), len(expect))
for act, exp in zip(actual, expect):
self.assertTrue(sorted(act) == sorted(exp))
def test_value_counts(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]}, columns=["A", "B"])
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("A")["B"].value_counts().sort_index(),
pdf.groupby("A")["B"].value_counts().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
)
self.assert_eq(
kdf.B.rename().groupby(kdf.A).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A).value_counts().sort_index(),
)
self.assert_eq(
kdf.B.groupby(kdf.A.rename()).value_counts().sort_index(),
pdf.B.groupby(pdf.A.rename()).value_counts().sort_index(),
)
self.assert_eq(
kdf.B.rename().groupby(kdf.A.rename()).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A.rename()).value_counts().sort_index(),
)
def test_size(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.groupby("A").size().sort_index(), pdf.groupby("A").size().sort_index())
self.assert_eq(
kdf.groupby("A")["B"].size().sort_index(), pdf.groupby("A")["B"].size().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].size().sort_index(), pdf.groupby("A")[["B"]].size().sort_index()
)
self.assert_eq(
kdf.groupby(["A", "B"]).size().sort_index(), pdf.groupby(["A", "B"]).size().sort_index()
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("X", "A")).size().sort_index(), pdf.groupby(("X", "A")).size().sort_index()
)
self.assert_eq(
kdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
pdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
)
def test_diff(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.groupby("b").diff().sort_index(), pdf.groupby("b").diff().sort_index())
self.assert_eq(
kdf.groupby(["a", "b"]).diff().sort_index(), pdf.groupby(["a", "b"]).diff().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])["a"].diff().sort_index(), pdf.groupby(["b"])["a"].diff().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
pdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).diff().sort_index(), pdf.groupby(pdf.b // 5).diff().sort_index()
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].diff().sort_index(),
pdf.groupby(pdf.b // 5)["a"].diff().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).diff().sort_index(), pdf.groupby(("x", "b")).diff().sort_index()
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
)
def test_rank(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.groupby("b").rank().sort_index(), pdf.groupby("b").rank().sort_index())
self.assert_eq(
kdf.groupby(["a", "b"]).rank().sort_index(), pdf.groupby(["a", "b"]).rank().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])["a"].rank().sort_index(), pdf.groupby(["b"])["a"].rank().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
pdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).rank().sort_index(), pdf.groupby(pdf.b // 5).rank().sort_index()
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].rank().sort_index(),
pdf.groupby(pdf.b // 5)["a"].rank().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).rank().sort_index(), pdf.groupby(("x", "b")).rank().sort_index()
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
)
def test_cumcount(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
for ascending in [True, False]:
self.assert_eq(
kdf.groupby("b").cumcount(ascending=ascending).sort_index(),
pdf.groupby("b").cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
pdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby("b").cumcount(ascending=ascending).sum(),
pdf.groupby("b").cumcount(ascending=ascending).sum(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
for ascending in [True, False]:
self.assert_eq(
kdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
pdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
)
def test_cummin(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cummin().sort_index(), pdf.groupby("b").cummin().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cummin().sort_index(),
pdf.groupby(["a", "b"]).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cummin().sort_index(),
pdf.groupby(["b"])["a"].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cummin().sort_index(),
pdf.groupby(pdf.b // 5).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cummin().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cummin().sum().sort_index(),
pdf.groupby("b").cummin().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b).cummin().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cummin().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummin().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummin().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cummin().sort_index(),
pdf.groupby(("x", "b")).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
)
kdf = ks.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cummin())
kdf = ks.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cummin())
def test_cummax(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cummax().sort_index(), pdf.groupby("b").cummax().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cummax().sort_index(),
pdf.groupby(["a", "b"]).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cummax().sort_index(),
pdf.groupby(["b"])["a"].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cummax().sort_index(),
pdf.groupby(pdf.b // 5).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cummax().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cummax().sum().sort_index(),
pdf.groupby("b").cummax().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b).cummax().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cummax().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummax().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummax().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cummax().sort_index(),
pdf.groupby(("x", "b")).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
)
kdf = ks.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cummax())
kdf = ks.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cummax())
def test_cumsum(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cumsum().sort_index(), pdf.groupby("b").cumsum().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumsum().sort_index(),
pdf.groupby(["a", "b"]).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumsum().sort_index(),
pdf.groupby(["b"])["a"].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cumsum().sort_index(),
pdf.groupby(pdf.b // 5).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cumsum().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cumsum().sum().sort_index(),
pdf.groupby("b").cumsum().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b).cumsum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumsum().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumsum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumsum().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cumsum().sort_index(),
pdf.groupby(("x", "b")).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
)
kdf = ks.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cumsum())
kdf = ks.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cumsum())
def test_cumprod(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cumprod().sort_index(),
pdf.groupby("b").cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumprod().sort_index(),
pdf.groupby(["a", "b"]).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumprod().sort_index(),
pdf.groupby(["b"])["a"].cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(kdf.b // 3).cumprod().sort_index(),
pdf.groupby(pdf.b // 3).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(kdf.b // 3)["a"].cumprod().sort_index(),
pdf.groupby(pdf.b // 3)["a"].cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby("b").cumprod().sum().sort_index(),
pdf.groupby("b").cumprod().sum().sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumprod().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumprod().sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cumprod().sort_index(),
pdf.groupby(("x", "b")).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
almost=True,
)
kdf = ks.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cumprod())
kdf = ks.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cumprod())
def test_nsmallest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
pdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
)
self.assert_eq(
kdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
pdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
)
self.assert_eq(
(kdf.b * 10).groupby(kdf.a).nsmallest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.groupby(kdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nsmallest do not support multi-index now"):
kdf.set_index(["a", "b"]).groupby(["c"])["d"].nsmallest(1)
def test_nlargest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby(["a"])["b"].nlargest(1).sort_values(),
pdf.groupby(["a"])["b"].nlargest(1).sort_values(),
)
self.assert_eq(
kdf.groupby(["a"])["b"].nlargest(2).sort_index(),
pdf.groupby(["a"])["b"].nlargest(2).sort_index(),
)
self.assert_eq(
(kdf.b * 10).groupby(kdf.a).nlargest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.groupby(kdf.a.rename()).nlargest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a.rename()).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nlargest do not support multi-index now"):
kdf.set_index(["a", "b"]).groupby(["c"])["d"].nlargest(1)
def test_fillna(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("A").fillna(0).sort_index(), pdf.groupby("A").fillna(0).sort_index()
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(0).sort_index(),
pdf.groupby("A")["C"].fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(0).sort_index(),
pdf.groupby("A")[["C"]].fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby("A").fillna(method="bfill").sort_index(),
pdf.groupby("A").fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
pdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A").fillna(method="ffill").sort_index(),
pdf.groupby("A").fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
pdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5).fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)["C"].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5).fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)["C"].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.C.rename().groupby(kdf.A).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A).fillna(0).sort_index(),
)
self.assert_eq(
kdf.C.groupby(kdf.A.rename()).fillna(0).sort_index(),
pdf.C.groupby(pdf.A.rename()).fillna(0).sort_index(),
)
self.assert_eq(
kdf.C.rename().groupby(kdf.A.rename()).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A.rename()).fillna(0).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("X", "A")).fillna(0).sort_index(),
pdf.groupby(("X", "A")).fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
)
def test_ffill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
kdf = ks.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby("A").ffill().sort_index(),
pdf.groupby("A").ffill().sort_index().drop("A", 1),
)
self.assert_eq(
kdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
kdf.groupby("A").ffill().sort_index(), pdf.groupby("A").ffill().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].ffill().sort_index(), pdf.groupby("A")["B"].ffill().sort_index()
)
self.assert_eq(kdf.groupby("A")["B"].ffill()[idx[6]], pdf.groupby("A")["B"].ffill()[idx[6]])
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
kdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index(),
)
def test_bfill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
kdf = ks.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby("A").bfill().sort_index(),
pdf.groupby("A").bfill().sort_index().drop("A", 1),
)
self.assert_eq(
kdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
kdf.groupby("A").bfill().sort_index(), pdf.groupby("A").bfill().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].bfill().sort_index(), pdf.groupby("A")["B"].bfill().sort_index(),
)
self.assert_eq(kdf.groupby("A")["B"].bfill()[idx[6]], pdf.groupby("A")["B"].bfill()[idx[6]])
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
kdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index(),
)
@unittest.skipIf(pd.__version__ < "0.24.0", "not supported before pandas 0.24.0")
def test_shift(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 2, 2, 3, 3] * 3,
"b": [1, 1, 2, 2, 3, 4] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.groupby("a").shift().sort_index(), pdf.groupby("a").shift().sort_index())
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(kdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index(),
# pdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index())
self.assert_eq(
kdf.groupby(["b"])["a"].shift().sort_index(),
pdf.groupby(["b"])["a"].shift().sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"])["c"].shift().sort_index(),
pdf.groupby(["a", "b"])["c"].shift().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).shift().sort_index(),
pdf.groupby(pdf.b // 5).shift().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].shift().sort_index(),
pdf.groupby(pdf.b // 5)["a"].shift().sort_index(),
)
# TODO: known pandas' bug when fill_value is not None pandas>=1.0.0
# https://github.com/pandas-dev/pandas/issues/31971#issue-565171762
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
pdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).shift().sort_index(),
pdf.a.rename().groupby(pdf.b).shift().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).shift().sort_index(),
pdf.a.groupby(pdf.b.rename()).shift().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).shift().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).shift().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "a")).shift().sort_index(),
pdf.groupby(("x", "a")).shift().sort_index(),
)
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index(),
# pdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index())
def test_apply(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b").apply(len).sort_index(), pdf.groupby("b").apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),
pdf.groupby("b")["a"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
pdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].apply(len).sort_index(),
pdf.groupby(["b"])["c"].apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].apply(len).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(len).sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
kdf.groupby("b").apply(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
pdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(("x", "b")).apply(len).sort_index(),
pdf.groupby(("x", "b")).apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
)
def test_apply_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply()
def test_apply_negative(self):
def func(_) -> ks.Series[int]:
return pd.Series([1])
with self.assertRaisesRegex(TypeError, "Series as a return type hint at frame groupby"):
ks.range(10).groupby("id").apply(func)
def test_apply_with_new_dataframe(self):
pdf = pd.DataFrame(
{"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5], "car_id": ["A", "A", "A", "B", "B"]}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
kdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
# dataframe with 1000+ records
pdf = pd.DataFrame(
{
"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5] * 300,
"car_id": ["A", "A", "A", "B", "B"] * 300,
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
kdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
def test_apply_with_new_dataframe_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply_with_new_dataframe()
def test_apply_key_handling(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
with ks.option_context("compute.shortcut_limit", 1):
self.assert_eq(
kdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
def test_apply_with_side_effect(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
kdf = ks.from_pandas(pdf)
acc = ks.utils.default_session().sparkContext.accumulator(0)
def sum_with_acc_frame(x) -> ks.DataFrame[np.float64, np.float64]:
nonlocal acc
acc += 1
return np.sum(x)
actual = kdf.groupby("d").apply(sum_with_acc_frame).sort_index()
actual.columns = ["d", "v"]
self.assert_eq(actual, pdf.groupby("d").apply(sum).sort_index().reset_index(drop=True))
self.assert_eq(acc.value, 2)
def sum_with_acc_series(x) -> np.float64:
nonlocal acc
acc += 1
return np.sum(x)
self.assert_eq(
kdf.groupby("d")["v"].apply(sum_with_acc_series).sort_index(),
pdf.groupby("d")["v"].apply(sum).sort_index().reset_index(drop=True),
)
self.assert_eq(acc.value, 4)
def test_transform(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
)
def test_transform_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_transform()
def test_filter(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
kdf.groupby("b").filter(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
pdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
pdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
)
def test_idxmax(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmax().sort_index(), kdf.groupby(["a"]).idxmax().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
kdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmax().sort_index(),
kdf.groupby(["a"])["b"].idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmax().sort_index(),
kdf.b.rename().groupby(kdf.a).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmax().sort_index(),
kdf.b.groupby(kdf.a.rename()).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmax().sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).idxmax().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmax only support one-level index now"):
kdf.set_index(["a", "b"]).groupby(["c"]).idxmax()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmax().sort_index(),
kdf.groupby(("x", "a")).idxmax().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
kdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
)
def test_idxmin(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmin().sort_index(), kdf.groupby(["a"]).idxmin().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
kdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmin().sort_index(),
kdf.groupby(["a"])["b"].idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmin().sort_index(),
kdf.b.rename().groupby(kdf.a).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmin().sort_index(),
kdf.b.groupby(kdf.a.rename()).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmin().sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).idxmin().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmin only support one-level index now"):
kdf.set_index(["a", "b"]).groupby(["c"]).idxmin()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmin().sort_index(),
kdf.groupby(("x", "a")).idxmin().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
kdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
)
def test_head(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf.groupby("a").head(2).sort_index(), kdf.groupby("a").head(2).sort_index())
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), kdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), kdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), kdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(), kdf.groupby("a")["b"].head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
kdf.groupby("a")["b"].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(2).sort_index(),
kdf.groupby("a")[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(-2).sort_index(),
kdf.groupby("a")[["b"]].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(100000).sort_index(),
kdf.groupby("a")[["b"]].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2).head(2).sort_index(),
kdf.groupby(kdf.a // 2).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)["b"].head(2).sort_index(),
kdf.groupby(kdf.a // 2)["b"].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)[["b"]].head(2).sort_index(),
kdf.groupby(kdf.a // 2)[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).head(2).sort_index(),
kdf.b.rename().groupby(kdf.a).head(2).sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).head(2).sort_index(),
kdf.b.groupby(kdf.a.rename()).head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).head(2).sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).head(2).sort_index(),
)
# multi-index
midx = pd.MultiIndex(
[["x", "y"], ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]],
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
)
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],
},
columns=["a", "b", "c"],
index=midx,
)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf.groupby("a").head(2).sort_index(), kdf.groupby("a").head(2).sort_index())
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), kdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), kdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), kdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(), kdf.groupby("a")["b"].head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
kdf.groupby("a")["b"].head(100000).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).head(2).sort_index(),
kdf.groupby(("x", "a")).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(-2).sort_index(),
kdf.groupby(("x", "a")).head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(100000).sort_index(),
kdf.groupby(("x", "a")).head(100000).sort_index(),
)
def test_missing(self):
kdf = ks.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# DataFrameGroupBy functions
missing_functions = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a"), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.groupby("a"), name)()
# SeriesGroupBy functions
missing_functions = inspect.getmembers(MissingPandasLikeSeriesGroupBy, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.a.groupby(kdf.a), name)()
# DataFrameGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a"), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.groupby("a"), name)
# SeriesGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeSeriesGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.a.groupby(kdf.a), name)
@staticmethod
def test_is_multi_agg_with_relabel():
assert is_multi_agg_with_relabel(a="max") is False
assert is_multi_agg_with_relabel(a_min=("a", "max"), a_max=("a", "min")) is True
def test_get_group(self):
pdf = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
pdf.columns.name = "Koalas"
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("class").get_group("bird"), pdf.groupby("class").get_group("bird"),
)
self.assert_eq(
kdf.groupby("class")["name"].get_group("mammal"),
pdf.groupby("class")["name"].get_group("mammal"),
)
self.assert_eq(
kdf.groupby("class")[["name"]].get_group("mammal"),
pdf.groupby("class")[["name"]].get_group("mammal"),
)
self.assert_eq(
kdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
pdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
)
self.assert_eq(
(kdf.max_speed + 1).groupby(kdf["class"]).get_group("mammal"),
(pdf.max_speed + 1).groupby(pdf["class"]).get_group("mammal"),
)
self.assert_eq(
kdf.groupby("max_speed").get_group(80.5), pdf.groupby("max_speed").get_group(80.5),
)
self.assertRaises(KeyError, lambda: kdf.groupby("class").get_group("fish"))
self.assertRaises(TypeError, lambda: kdf.groupby("class").get_group(["bird", "mammal"]))
self.assertRaises(KeyError, lambda: kdf.groupby("class")["name"].get_group("fish"))
self.assertRaises(
TypeError, lambda: kdf.groupby("class")["name"].get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: kdf.groupby(["class", "name"]).get_group(("lion", "mammal"))
)
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group(("lion",)))
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group(("mammal",)))
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group("mammal"))
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("A", "name"), ("B", "class"), ("C", "max_speed")])
pdf.columns.names = ["Hello", "Koalas"]
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby(("B", "class")).get_group("bird"),
pdf.groupby(("B", "class")).get_group("bird"),
)
self.assert_eq(
kdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
pdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
)
self.assert_eq(
kdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
pdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
pdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
)
self.assert_eq(
(kdf[("C", "max_speed")] + 1).groupby(kdf[("B", "class")]).get_group("mammal"),
(pdf[("C", "max_speed")] + 1).groupby(pdf[("B", "class")]).get_group("mammal"),
)
self.assert_eq(
kdf.groupby(("C", "max_speed")).get_group(80.5),
pdf.groupby(("C", "max_speed")).get_group(80.5),
)
self.assertRaises(KeyError, lambda: kdf.groupby(("B", "class")).get_group("fish"))
self.assertRaises(
TypeError, lambda: kdf.groupby(("B", "class")).get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: kdf.groupby(("B", "class"))[("A", "name")].get_group("fish")
)
self.assertRaises(
KeyError,
lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion", "mammal")),
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion",)),
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal",))
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group("mammal")
)
| 1 | 17,258 | I'm wondering if a positive test case is needed here? Then we might compare the result DataFrame with a pre-created Koalas DataFrame. | databricks-koalas | py |
@@ -328,9 +328,9 @@ public interface Value<T> extends Iterable<T> {
}
/**
- * Checks, this {@code Value} is empty, i.e. if the underlying value is absent.
+ * Checks whether this {@code Value} is empty, i.e. if the underlying value is absent.
*
- * @return false, if no underlying value is present, true otherwise.
+ * @return true, if no underlying value is present, false otherwise.
*/
boolean isEmpty();
| 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang;
import javaslang.collection.*;
import javaslang.collection.HashMap;
import javaslang.collection.HashSet;
import javaslang.collection.Iterator;
import javaslang.collection.List;
import javaslang.collection.Map;
import javaslang.collection.PriorityQueue;
import javaslang.collection.Queue;
import javaslang.collection.Set;
import javaslang.collection.SortedSet;
import javaslang.collection.Stack;
import javaslang.collection.TreeSet;
import javaslang.collection.Vector;
import javaslang.control.*;
import java.io.*;
import java.util.*;
import java.util.function.*;
import java.util.stream.StreamSupport;
/**
* Functional programming is all about values and transformation of values using functions. The {@code Value}
* type reflects the values in a functional setting. It can be seen as the result of a partial function application.
* Hence the result may be undefined. If a value is undefined, we say it is empty.
* <p>
* How the empty state is interpreted depends on the context, i.e. it may be <em>undefined</em>, <em>failed</em>,
* <em>no elements</em>, etc.
* <p>
* <p>
* Basic operations:
* <p>
* <ul>
* <li>{@link #get()}</li>
* <li>{@link #getOption()}</li>
* <li>{@link #getOrElse(Object)}</li>
* <li>{@link #getOrElse(Supplier)}</li>
* <li>{@link #getOrElseThrow(Supplier)}</li>
* <li>{@link #isEmpty()}</li>
* <li>{@link #isSingleValued()}</li>
* <li>{@link #map(Function)}</li>
* <li>{@link #stringPrefix()}</li>
* </ul>
* <p>
* Equality checks:
* <p>
* <ul>
* <li>{@link #corresponds(Iterable, BiPredicate)}</li>
* <li>{@link #eq(Object)}</li>
* </ul>
* <p>
* Iterable extensions:
* <p>
* <ul>
* <li>{@link #contains(Object)}</li>
* <li>{@link #exists(Predicate)}</li>
* <li>{@link #forAll(Predicate)}</li>
* <li>{@link #forEach(Consumer)}</li>
* <li>{@link #iterator()}</li>
* </ul>
* <p>
* Side-effects:
* <p>
* <ul>
* <li>{@link #out(PrintStream)}</li>
* <li>{@link #out(PrintWriter)}</li>
* <li>{@link #peek(Consumer)}</li>
* <li>{@link #stderr()}</li>
* <li>{@link #stdout()}</li>
* </ul>
* <p>
* Type conversion:
* <p>
* <ul>
* <li>{@link #toArray()}</li>
* <li>{@link #toCharSeq()}</li>
* <li>{@link #toJavaArray()}</li>
* <li>{@link #toJavaArray(Class)}</li>
* <li>{@link #toJavaCollection(Supplier)}</li>
* <li>{@link #toJavaList()}</li>
* <li>{@link #toJavaList(Supplier)}</li>
* <li>{@link #toJavaMap(Function)}</li>
* <li>{@link #toJavaMap(Supplier, Function)}</li>
* <li>{@link #toJavaOptional()}</li>
* <li>{@link #toJavaSet()}</li>
* <li>{@link #toJavaSet(Supplier)}</li>
* <li>{@link #toJavaStream()}</li>
* <li>{@link #toLeft(Object)}</li>
* <li>{@link #toLeft(Supplier)}</li>
* <li>{@link #toList()}</li>
* <li>{@link #toMap(Function)}</li>
* <li>{@link #toOption()}</li>
* <li>{@link #toQueue()}</li>
* <li>{@link #toRight(Object)}</li>
* <li>{@link #toRight(Supplier)}</li>
* <li>{@link #toSet()}</li>
* <li>{@link #toSortedSet(Comparator)}</li>
* <li>{@link #toSortedQueue(Comparator)}</li>
* <li>{@link #toStack()}</li>
* <li>{@link #toStream()}</li>
* <li>{@link #toString()}</li>
* <li>{@link #toTree()}</li>
* <li>{@link #toTry()}</li>
* <li>{@link #toTry(Supplier)}</li>
* <li>{@link #toVector()}</li>
* </ul>
* <p>
* <strong>Please note:</strong> flatMap signatures are manifold and have to be declared by subclasses of Value.
*
* @param <T> The type of the wrapped value.
* @author Daniel Dietrich
* @since 2.0.0
*/
public interface Value<T> extends Iterable<T> {
/**
* Narrows a widened {@code Value<? extends T>} to {@code Value<T>}
* by performing a type safe-cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param value A {@code Value}.
* @param <T> Component type of the {@code Value}.
* @return the given {@code value} instance as narrowed type {@code Value<T>}.
*/
@SuppressWarnings("unchecked")
static <T> Value<T> narrow(Value<? extends T> value) {
return (Value<T>) value;
}
/**
* Shortcut for {@code exists(e -> Objects.equals(e, element))}, tests if the given {@code element} is contained.
*
* @param element An Object of type A, may be null.
* @return true, if element is contained, false otherwise.
*/
default boolean contains(T element) {
return exists(e -> Objects.equals(e, element));
}
/**
* Tests whether every element of this iterable relates to the corresponding element of another iterable by
* satisfying a test predicate.
*
* @param <U> Component type of that iterable
* @param that the other iterable
* @param predicate the test predicate, which relates elements from both iterables
* @return {@code true} if both iterables have the same length and {@code predicate(x, y)}
* is {@code true} for all corresponding elements {@code x} of this iterable and {@code y} of {@code that},
* otherwise {@code false}.
*/
default <U> boolean corresponds(Iterable<U> that, BiPredicate<? super T, ? super U> predicate) {
final java.util.Iterator<T> it1 = iterator();
final java.util.Iterator<U> it2 = that.iterator();
while (it1.hasNext() && it2.hasNext()) {
if (!predicate.test(it1.next(), it2.next())) {
return false;
}
}
return !it1.hasNext() && !it2.hasNext();
}
/**
* A <em>smoothing</em> replacement for {@code equals}. It is similar to Scala's {@code ==} but better in the way
* that it is not limited to collection types, e.g. {@code Some(1) eq List(1)}, {@code None eq Failure(x)} etc.
* <p>
* In a nutshell: eq checks <strong>congruence of structures</strong> and <strong>equality of contained values</strong>.
* <p>
* Example:
* <p>
* <pre><code>
* // ((1, 2), ((3))) => structure: (()(())) values: 1, 2, 3
* final Value<?> i1 = List.of(List.of(1, 2), Arrays.asList(List.of(3)));
* final Value<?> i2 = Queue.of(Stream.of(1, 2), List.of(Lazy.of(() -> 3)));
* assertThat(i1.eq(i2)).isTrue();
* </code></pre>
* <p>
* Semantics:
* <p>
* <pre><code>
* o == this : true
* o instanceof Value : iterable elements are eq, non-iterable elements equals, for all (o1, o2) in (this, o)
* o instanceof Iterable : this eq Iterator.of((Iterable<?>) o);
* otherwise : false
* </code></pre>
*
* @param o An object
* @return true, if this equals o according to the rules defined above, otherwise false.
*/
default boolean eq(Object o) {
if (o == this) {
return true;
} else if (o instanceof Value) {
final Value<?> that = (Value<?>) o;
return this.iterator().corresponds(that.iterator(), (o1, o2) -> {
if (o1 instanceof Value) {
return ((Value<?>) o1).eq(o2);
} else if (o2 instanceof Value) {
return ((Value<?>) o2).eq(o1);
} else {
return Objects.equals(o1, o2);
}
});
} else if (o instanceof Iterable) {
final Value<?> that = Iterator.ofAll((Iterable<?>) o);
return this.eq(that);
} else {
return false;
}
}
/**
* Checks, if an element exists such that the predicate holds.
*
* @param predicate A Predicate
* @return true, if predicate holds for one or more elements, false otherwise
* @throws NullPointerException if {@code predicate} is null
*/
default boolean exists(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (T t : this) {
if (predicate.test(t)) {
return true;
}
}
return false;
}
/**
* Checks, if the given predicate holds for all elements.
*
* @param predicate A Predicate
* @return true, if the predicate holds for all elements, false otherwise
* @throws NullPointerException if {@code predicate} is null
*/
default boolean forAll(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return !exists(predicate.negate());
}
/**
* Performs an action on each element.
*
* @param action A {@code Consumer}
* @throws NullPointerException if {@code action} is null
*/
@Override
default void forEach(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
for (T t : this) {
action.accept(t);
}
}
/**
* Gets the underlying value or throws if no value is present.
*
* @return the underlying value
* @throws java.util.NoSuchElementException if no value is defined
*/
T get();
/**
* Gets the underlying value as Option.
*
* @return Some(value) if a value is present, None otherwise
*/
default Option<T> getOption() {
return isEmpty() ? Option.none() : Option.some(get());
}
/**
* Returns the underlying value if present, otherwise {@code other}.
*
* @param other An alternative value.
* @return A value of type {@code T}
*/
default T getOrElse(T other) {
return isEmpty() ? other : get();
}
/**
* Returns the underlying value if present, otherwise {@code other}.
*
* @param supplier An alternative value supplier.
* @return A value of type {@code T}
* @throws NullPointerException if supplier is null
*/
default T getOrElse(Supplier<? extends T> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return isEmpty() ? supplier.get() : get();
}
/**
* Returns the underlying value if present, otherwise throws {@code supplier.get()}.
*
* @param <X> a Throwable type
* @param supplier An exception supplier.
* @return A value of type {@code T}.
* @throws NullPointerException if supplier is null
* @throws X if no value is present
*/
default <X extends Throwable> T getOrElseThrow(Supplier<X> supplier) throws X {
Objects.requireNonNull(supplier, "supplier is null");
if (isEmpty()) {
throw supplier.get();
} else {
return get();
}
}
/**
* Returns the underlying value if present, otherwise returns the result of {@code Try.of(supplier).get()}.
*
* @param supplier An alternative value supplier.
* @return A value of type {@code T}.
* @throws NullPointerException if supplier is null
* @throws Try.NonFatalException containing the original exception if this Value was empty and the Try failed.
*/
default T getOrElseTry(Try.CheckedSupplier<? extends T> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return isEmpty() ? Try.of(supplier).get() : get();
}
/**
* Checks, this {@code Value} is empty, i.e. if the underlying value is absent.
*
* @return false, if no underlying value is present, true otherwise.
*/
boolean isEmpty();
/**
* States whether this is a single-valued type.
*
* @return {@code true} if this is single-valued, {@code false} otherwise.
*/
boolean isSingleValued();
/**
* Maps the underlying value to a different component type.
*
* @param mapper A mapper
* @param <U> The new component type
* @return A new value
*/
<U> Value<U> map(Function<? super T, ? extends U> mapper);
/**
* Performs the given {@code action} on the first element if this is an <em>eager</em> implementation.
* Performs the given {@code action} on all elements (the first immediately, successive deferred),
* if this is a <em>lazy</em> implementation.
*
* @param action The action that will be performed on the element(s).
* @return this instance
*/
Value<T> peek(Consumer<? super T> action);
/**
* Returns the name of this Value type, which is used by toString().
*
* @return This type name.
*/
String stringPrefix();
// -- output
/**
* Sends the string representations of this to the {@link PrintStream}.
* If this value consists of multiple elements, each element is displayed in a new line.
*
* @param out The PrintStream to write to
* @throws IllegalStateException if {@code PrintStream.checkError()} is true after writing to stream.
*/
default void out(PrintStream out) {
for (T t : this) {
out.println(String.valueOf(t));
if (out.checkError()) {
throw new IllegalStateException("Error writing to PrintStream");
}
}
}
/**
* Sends the string representations of this to the {@link PrintWriter}.
* If this value consists of multiple elements, each element is displayed in a new line.
*
* @param writer The PrintWriter to write to
* @throws IllegalStateException if {@code PrintWriter.checkError()} is true after writing to writer.
*/
default void out(PrintWriter writer) {
for (T t : this) {
writer.println(String.valueOf(t));
if (writer.checkError()) {
throw new IllegalStateException("Error writing to PrintWriter");
}
}
}
/**
* Sends the string representations of this to the standard error stream {@linkplain System#err}.
* If this value consists of multiple elements, each element is displayed in a new line.
*
* @throws IllegalStateException if {@code PrintStream.checkError()} is true after writing to stderr.
*/
default void stderr() {
out(System.err);
}
/**
* Sends the string representations of this to the standard output stream {@linkplain System#out}.
* If this value consists of multiple elements, each element is displayed in a new line.
*
* @throws IllegalStateException if {@code PrintStream.checkError()} is true after writing to stdout.
*/
default void stdout() {
out(System.out);
}
// -- Adjusted return types of Iterable
/**
* Returns a rich {@code javaslang.collection.Iterator}.
*
* @return A new Iterator
*/
@Override
Iterator<T> iterator();
// -- conversion methods
/**
* Converts this to a {@link Array}.
*
* @return A new {@link Array}.
*/
default Array<T> toArray() {
return ValueModule.toTraversable(this, Array.empty(), Array::of, Array::ofAll);
}
/**
* Converts this to a {@link CharSeq}.
*
* @return A new {@link CharSeq}.
*/
default CharSeq toCharSeq() {
return CharSeq.of(toString());
}
/**
* Converts this to a specific {@link java.util.Collection}.
*
* @param factory A {@code java.util.Collection} factory
* @param <C> a sub-type of {@code java.util.Collection}
* @return a new {@code java.util.Collection} of type {@code C}
*/
default <C extends java.util.Collection<T>> C toJavaCollection(Supplier<C> factory) {
return ValueModule.toJavaCollection(this, factory.get());
}
/**
* Converts this to an untyped Java array.
*
* @return A new Java array.
*/
default Object[] toJavaArray() {
return toJavaList().toArray();
}
/**
* Converts this to a typed Java array.
*
* @param componentType Component type of the array
* @return A new Java array.
* @throws NullPointerException if componentType is null
*/
@SuppressWarnings("unchecked")
default T[] toJavaArray(Class<T> componentType) {
Objects.requireNonNull(componentType, "componentType is null");
final java.util.List<T> list = toJavaList();
return list.toArray((T[]) java.lang.reflect.Array.newInstance(componentType, list.size()));
}
/**
* Converts this to an {@link java.util.List}.
*
* @return A new {@link java.util.ArrayList}.
*/
default java.util.List<T> toJavaList() {
return ValueModule.toJavaCollection(this, new ArrayList<>());
}
/**
* Converts this to a specific {@link java.util.List}.
*
* @param factory A {@code java.util.List} factory
* @param <LIST> a sub-type of {@code java.util.List}
* @return a new {@code java.util.List} of type {@code LIST}
*/
default <LIST extends java.util.List<T>> LIST toJavaList(Supplier<LIST> factory) {
return ValueModule.toJavaCollection(this, factory.get());
}
/**
* Converts this to a {@link java.util.Map}.
*
* @param f A function that maps an element to a key/value pair represented by Tuple2
* @param <K> The key type
* @param <V> The value type
* @return A new {@link java.util.HashMap}.
*/
default <K, V> java.util.Map<K, V> toJavaMap(Function<? super T, ? extends Tuple2<? extends K, ? extends V>> f) {
return toJavaMap(java.util.HashMap::new, f);
}
/**
* Converts this to a specific {@link java.util.Map}.
*
* @param factory A {@code java.util.Map} factory
* @param f A function that maps an element to a key/value pair represented by Tuple2
* @param <K> The key type
* @param <V> The value type
* @param <MAP> a sub-type of {@code java.util.Map}
* @return a new {@code java.util.Map} of type {@code MAP}
*/
default <K, V, MAP extends java.util.Map<K, V>> MAP toJavaMap(Supplier<MAP> factory, Function<? super T, ? extends Tuple2<? extends K, ? extends V>> f) {
Objects.requireNonNull(f, "f is null");
final MAP map = factory.get();
if (!isEmpty()) {
if (isSingleValued()) {
final Tuple2<? extends K, ? extends V> entry = f.apply(get());
map.put(entry._1, entry._2);
} else {
for (T a : this) {
final Tuple2<? extends K, ? extends V> entry = f.apply(a);
map.put(entry._1, entry._2);
}
}
}
return map;
}
/**
* Converts this to an {@link java.util.Optional}.
*
* @return A new {@link java.util.Optional}.
*/
default Optional<T> toJavaOptional() {
return isEmpty() ? Optional.empty() : Optional.ofNullable(get());
}
/**
* Converts this to a {@link java.util.Set}.
*
* @return A new {@link java.util.HashSet}.
*/
default java.util.Set<T> toJavaSet() {
return ValueModule.toJavaCollection(this, new java.util.HashSet<>());
}
/**
* Converts this to a specific {@link java.util.Set}.
*
* @param factory A {@code java.util.Set} factory
* @param <SET> a sub-type of {@code java.util.Set}
* @return a new {@code java.util.Set} of type {@code SET}
*/
default <SET extends java.util.Set<T>> SET toJavaSet(Supplier<SET> factory) {
return ValueModule.toJavaCollection(this, factory.get());
}
/**
* Converts this to a {@link java.util.stream.Stream}.
*
* @return A new {@link java.util.stream.Stream}.
*/
default java.util.stream.Stream<T> toJavaStream() {
return StreamSupport.stream(spliterator(), false);
}
/**
* Converts this to a {@link Either}.
*
* @param <R> right type
* @param right A supplier of a right value
* @return A new {@link Either.Right} containing the result of {@code right} if this is empty, otherwise
* a new {@link Either.Left} containing this value.
* @throws NullPointerException if {@code right} is null
*/
default <R> Either<T, R> toLeft(Supplier<? extends R> right) {
Objects.requireNonNull(right, "right is null");
return isEmpty() ? Either.right(right.get()) : Either.left(get());
}
/**
* Converts this to a {@link Either}.
*
* @param <R> right type
* @param right An instance of a right value
* @return A new {@link Either.Right} containing the value of {@code right} if this is empty, otherwise
* a new {@link Either.Left} containing this value.
* @throws NullPointerException if {@code right} is null
*/
default <R> Either<T, R> toLeft(R right) {
return isEmpty() ? Either.right(right) : Either.left(get());
}
/**
* Converts this to a {@link List}.
*
* @return A new {@link List}.
*/
default List<T> toList() {
return ValueModule.toTraversable(this, List.empty(), List::of, List::ofAll);
}
/**
* Converts this to a {@link Map}.
*
* @param f A function that maps an element to a key/value pair represented by Tuple2
* @param <K> The key type
* @param <V> The value type
* @return A new {@link HashMap}.
*/
default <K, V> Map<K, V> toMap(Function<? super T, ? extends Tuple2<? extends K, ? extends V>> f) {
Objects.requireNonNull(f, "f is null");
if (isEmpty()) {
return HashMap.empty();
} else if (isSingleValued()) {
return HashMap.of(f.apply(get()));
} else {
return HashMap.ofEntries(Iterator.ofAll(this).map(f));
}
}
/**
* Converts this to an {@link Option}.
*
* @return A new {@link Option}.
*/
default Option<T> toOption() {
if (this instanceof Option) {
return (Option<T>) this;
} else {
return getOption();
}
}
/**
* Converts this to a {@link Queue}.
*
* @return A new {@link Queue}.
*/
default Queue<T> toQueue() {
return ValueModule.toTraversable(this, Queue.empty(), Queue::of, Queue::ofAll);
}
/**
* Converts this to a sorted {@link Queue}.
*
* @return A new {@link Queue}.
*/
default PriorityQueue<T> toSortedQueue(Comparator<? super T> comparator) {
if (this instanceof PriorityQueue) {
return (PriorityQueue<T>) this;
} else {
final PriorityQueue<T> empty = PriorityQueue.empty(comparator);
final Function<T, PriorityQueue<T>> of = value -> PriorityQueue.of(comparator, value);
final Function<Iterable<T>, PriorityQueue<T>> ofAll = values -> PriorityQueue.ofAll(comparator, values);
return ValueModule.toTraversable(this, empty, of, ofAll);
}
}
/**
* Converts this to a {@link Either}.
*
* @param <L> left type
* @param left A supplier of a left value
* @return A new {@link Either.Left} containing the result of {@code left} if this is empty, otherwise
* a new {@link Either.Right} containing this value.
* @throws NullPointerException if {@code left} is null
*/
default <L> Either<L, T> toRight(Supplier<? extends L> left) {
Objects.requireNonNull(left, "left is null");
return isEmpty() ? Either.left(left.get()) : Either.right(get());
}
/**
* Converts this to a {@link Either}.
*
* @param <L> left type
* @param left An instance of a left value
* @return A new {@link Either.Left} containing the value of {@code left} if this is empty, otherwise
* a new {@link Either.Right} containing this value.
* @throws NullPointerException if {@code left} is null
*/
default <L> Either<L, T> toRight(L left) {
return isEmpty() ? Either.left(left) : Either.right(get());
}
/**
* Converts this to a {@link Set}.
*
* @return A new {@link HashSet}.
*/
default Set<T> toSet() {
return ValueModule.toTraversable(this, HashSet.empty(), HashSet::of, HashSet::ofAll);
}
/**
* Converts this to a {@link SortedSet}.
*
* @return A new {@link TreeSet}.
*/
default SortedSet<T> toSortedSet(Comparator<? super T> comparator) {
return ValueModule.toTraversable(this, TreeSet.empty(comparator), value -> TreeSet.of(comparator, value), values -> TreeSet.ofAll(comparator, values));
}
/**
* Converts this to a {@link Stack}.
*
* @return A new {@link List}, which is a {@link Stack}.
*/
default Stack<T> toStack() {
return toList();
}
/**
* Converts this to a {@link Stream}.
*
* @return A new {@link Stream}.
*/
default Stream<T> toStream() {
return ValueModule.toTraversable(this, Stream.empty(), Stream::of, Stream::ofAll);
}
/**
* Converts this to a {@link Try}.
* <p>
* If this value is undefined, i.e. empty, then a new {@code Failure(NoSuchElementException)} is returned,
* otherwise a new {@code Success(value)} is returned.
*
* @return A new {@link Try}.
*/
default Try<T> toTry() {
if (this instanceof Try) {
return (Try<T>) this;
} else {
return Try.of(this::get);
}
}
/**
* Converts this to a {@link Try}.
* <p>
* If this value is undefined, i.e. empty, then a new {@code Failure(ifEmpty.get())} is returned,
* otherwise a new {@code Success(value)} is returned.
*
* @param ifEmpty an exception supplier
* @return A new {@link Try}.
*/
default Try<T> toTry(Supplier<? extends Throwable> ifEmpty) {
Objects.requireNonNull(ifEmpty, "ifEmpty is null");
return isEmpty() ? Try.failure(ifEmpty.get()) : toTry();
}
/**
* Converts this to a {@link Tree}.
*
* @return A new {@link Tree}.
*/
default Tree<T> toTree() {
return ValueModule.toTraversable(this, Tree.empty(), Tree::of, Tree::ofAll);
}
/**
* Converts this to a {@link Vector}.
*
* @return A new {@link Vector}.
*/
default Vector<T> toVector() {
return ValueModule.toTraversable(this, Vector.empty(), Vector::of, Vector::ofAll);
}
// -- Object
/**
* Clarifies that values have a proper equals() method implemented.
* <p>
* See <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#equals-java.lang.Object-">Object.equals(Object)</a>.
*
* @param o An object
* @return true, if this equals o, false otherwise
*/
@Override
boolean equals(Object o);
/**
* Clarifies that values have a proper hashCode() method implemented.
* <p>
* See <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#hashCode--">Object.hashCode()</a>.
*
* @return The hashcode of this object
*/
@Override
int hashCode();
/**
* Clarifies that values have a proper toString() method implemented.
* <p>
* See <a href="https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html#toString--">Object.toString()</a>.
*
* @return A String representation of this object
*/
@Override
String toString();
}
interface ValueModule {
static <T extends Traversable<V>, V> T toTraversable(Value<V> value, T empty,
Function<V, T> ofElement,
Function<Iterable<V>, T> ofAll) {
if (value.isEmpty()) {
return empty;
} else if (value.isSingleValued()) {
return ofElement.apply(value.get());
} else {
return ofAll.apply(value);
}
}
static <T extends java.util.Collection<V>, V> T toJavaCollection(Value<V> value, T empty) {
if (!value.isEmpty()) {
if (value.isSingleValued()) {
empty.add(value.get());
} else {
value.forEach(empty::add);
}
}
return empty;
}
}
| 1 | 8,167 | I think it was reversed | vavr-io-vavr | java |
@@ -12,14 +12,10 @@ public class ASTJspDirective extends AbstractJspNode {
*/
private String name;
- public ASTJspDirective(int id) {
+ ASTJspDirective(int id) {
super(id);
}
- public ASTJspDirective(JspParser p, int id) {
- super(p, id);
- }
-
/**
* @return Returns the name.
*/ | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
/* Generated By:JJTree: Do not edit this line. ASTJspDirective.java */
package net.sourceforge.pmd.lang.jsp.ast;
public class ASTJspDirective extends AbstractJspNode {
/**
* Name of the element-tag. Cannot be null.
*/
private String name;
public ASTJspDirective(int id) {
super(id);
}
public ASTJspDirective(JspParser p, int id) {
super(p, id);
}
/**
* @return Returns the name.
*/
public String getName() {
return name;
}
/**
* @param name
* The name to set.
*/
public void setName(String name) {
this.name = name;
}
/**
* Accept the visitor. *
*/
@Override
public Object jjtAccept(JspParserVisitor visitor, Object data) {
return visitor.visit(this, data);
}
}
| 1 | 17,011 | The setter "setName" can be package-private as well | pmd-pmd | java |
@@ -30,7 +30,7 @@ void AdamSolver<Dtype>::ComputeUpdateValue(int param_id, Dtype rate) {
Blob<Dtype>* val_v = this->history_[param_id + update_history_offset].get();
Blob<Dtype>* val_t = this->temp_[param_id].get();
- const int t = this->iter_ + 1;
+ const int t = this->iter_ + 1;
const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) /
(Dtype(1.) - pow(beta1, t));
const int N = net_params[param_id]->count(); | 1 | #include <vector>
#include "caffe/sgd_solvers.hpp"
namespace caffe {
template <typename Dtype>
void AdamSolver<Dtype>::AdamPreSolve() {
// Add the extra history entries for Adam after those from
// SGDSolver::PreSolve
const vector<Blob<Dtype>*>& net_params = this->net_->learnable_params();
for (int i = 0; i < net_params.size(); ++i) {
const vector<int>& shape = net_params[i]->shape();
this->history_.push_back(
shared_ptr<Blob<Dtype> >(new Blob<Dtype>(shape)));
}
}
template <typename Dtype>
void AdamSolver<Dtype>::ComputeUpdateValue(int param_id, Dtype rate) {
const vector<Blob<Dtype>*>& net_params = this->net_->learnable_params();
const vector<float>& net_params_lr = this->net_->params_lr();
Dtype local_rate = rate * net_params_lr[param_id];
const Dtype beta1 = this->param_.momentum();
const Dtype beta2 = this->param_.momentum2();
// we create aliases for convenience
size_t update_history_offset = net_params.size();
Blob<Dtype>* val_m = this->history_[param_id].get();
Blob<Dtype>* val_v = this->history_[param_id + update_history_offset].get();
Blob<Dtype>* val_t = this->temp_[param_id].get();
const int t = this->iter_ + 1;
const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) /
(Dtype(1.) - pow(beta1, t));
const int N = net_params[param_id]->count();
const Dtype eps_hat = this->param_.delta();
switch (Caffe::mode()) {
case Caffe::CPU: {
// update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t
caffe_cpu_axpby(N, Dtype(1)-beta1,
net_params[param_id]->cpu_diff(), beta1,
val_m->mutable_cpu_data());
// update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2
caffe_mul(N,
net_params[param_id]->cpu_diff(),
net_params[param_id]->cpu_diff(),
val_t->mutable_cpu_data());
caffe_cpu_axpby(N, Dtype(1)-beta2,
val_t->cpu_data(), beta2,
val_v->mutable_cpu_data());
// set update
caffe_powx(N,
val_v->cpu_data(), Dtype(0.5),
val_t->mutable_cpu_data());
caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data());
caffe_div(N,
val_m->cpu_data(),
val_t->cpu_data(),
val_t->mutable_cpu_data());
caffe_cpu_scale(N, local_rate*correction,
val_t->cpu_data(),
net_params[param_id]->mutable_cpu_diff());
break;
}
case Caffe::GPU: {
#ifndef CPU_ONLY
// update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t
caffe_gpu_axpby(N, Dtype(1)-beta1,
net_params[param_id]->gpu_diff(), beta1,
val_m->mutable_gpu_data());
// update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2
caffe_gpu_mul(N,
net_params[param_id]->gpu_diff(),
net_params[param_id]->gpu_diff(),
val_t->mutable_gpu_data());
caffe_gpu_axpby(N, Dtype(1)-beta2,
val_t->gpu_data(), beta2,
val_v->mutable_gpu_data());
// set update
caffe_gpu_powx(N,
val_v->gpu_data(), Dtype(0.5),
val_t->mutable_gpu_data());
caffe_gpu_add_scalar(N, eps_hat,
val_t->mutable_gpu_data());
caffe_gpu_div(N,
val_m->gpu_data(),
val_t->gpu_data(),
val_t->mutable_gpu_data());
caffe_gpu_scale(N, local_rate*correction,
val_t->gpu_data(),
net_params[param_id]->mutable_gpu_diff());
#else
NO_GPU;
#endif
break;
}
default:
LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode();
}
}
INSTANTIATE_CLASS(AdamSolver);
REGISTER_SOLVER_CLASS(Adam);
} // namespace caffe
| 1 | 35,899 | Not sure if this is intentional or snuck in through find/replace; while this is a correct style fix (and is welcome in another PR), it should not be done here so that the commit maintains a logical changeset. | BVLC-caffe | cpp |
@@ -117,4 +117,17 @@ BOOST_AUTO_TEST_CASE(test_exceptions) {
BOOST_CHECK_NO_THROW(buf2.write((const uint8_t*)"bar", 3));
}
+#ifndef _WIN32
+// We can't allocate 1 GB of memory in 32-bit environments.
+BOOST_AUTO_TEST_CASE(test_over_two_gb) {
+ TMemoryBuffer buf;
+ std::vector<uint8_t> small_buff(1);
+ std::vector<uint8_t> one_gb(1073741824);
+
+ buf.write(&small_buff[0], small_buff.size());
+ buf.write(&one_gb[0], one_gb.size());
+ BOOST_CHECK_THROW(buf.write(&one_gb[0], one_gb.size()), TTransportException);
+}
+#endif
+
BOOST_AUTO_TEST_SUITE_END() | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <boost/test/auto_unit_test.hpp>
#include <iostream>
#include <climits>
#include <vector>
#include <thrift/protocol/TBinaryProtocol.h>
#include <thrift/stdcxx.h>
#include <thrift/transport/TBufferTransports.h>
#include "gen-cpp/ThriftTest_types.h"
BOOST_AUTO_TEST_SUITE(TMemoryBufferTest)
using apache::thrift::protocol::TBinaryProtocol;
using apache::thrift::transport::TMemoryBuffer;
using apache::thrift::transport::TTransportException;
using apache::thrift::stdcxx::shared_ptr;
using std::cout;
using std::endl;
using std::string;
BOOST_AUTO_TEST_CASE(test_read_write_grow) {
// Added to test the fix for THRIFT-1248
TMemoryBuffer uut;
const int maxSize = 65536;
uint8_t verify[maxSize];
std::vector<uint8_t> buf;
buf.resize(maxSize);
for (uint32_t i = 0; i < maxSize; ++i) {
buf[i] = static_cast<uint8_t>(i);
}
for (uint32_t i = 1; i < maxSize; i *= 2) {
uut.write(&buf[0], i);
}
for (uint32_t i = 1; i < maxSize; i *= 2) {
uut.read(verify, i);
BOOST_CHECK_EQUAL(0, ::memcmp(verify, &buf[0], i));
}
}
BOOST_AUTO_TEST_CASE(test_roundtrip) {
shared_ptr<TMemoryBuffer> strBuffer(new TMemoryBuffer());
shared_ptr<TBinaryProtocol> binaryProtcol(new TBinaryProtocol(strBuffer));
thrift::test::Xtruct a;
a.i32_thing = 10;
a.i64_thing = 30;
a.string_thing = "holla back a";
a.write(binaryProtcol.get());
std::string serialized = strBuffer->getBufferAsString();
shared_ptr<TMemoryBuffer> strBuffer2(new TMemoryBuffer());
shared_ptr<TBinaryProtocol> binaryProtcol2(new TBinaryProtocol(strBuffer2));
strBuffer2->resetBuffer((uint8_t*)serialized.data(), static_cast<uint32_t>(serialized.length()));
thrift::test::Xtruct a2;
a2.read(binaryProtcol2.get());
BOOST_CHECK(a == a2);
}
BOOST_AUTO_TEST_CASE(test_copy) {
string* str1 = new string("abcd1234");
const char* data1 = str1->data();
TMemoryBuffer buf((uint8_t*)str1->data(),
static_cast<uint32_t>(str1->length()),
TMemoryBuffer::COPY);
delete str1;
string* str2 = new string("plsreuse");
bool obj_reuse = (str1 == str2);
bool dat_reuse = (data1 == str2->data());
BOOST_TEST_MESSAGE("Object reuse: " << obj_reuse << " Data reuse: " << dat_reuse
<< ((obj_reuse && dat_reuse) ? " YAY!" : ""));
delete str2;
string str3 = "wxyz", str4 = "6789";
buf.readAppendToString(str3, 4);
buf.readAppendToString(str4, INT_MAX);
BOOST_CHECK(str3 == "wxyzabcd");
BOOST_CHECK(str4 == "67891234");
}
BOOST_AUTO_TEST_CASE(test_exceptions) {
char data[] = "foo\0bar";
TMemoryBuffer buf1((uint8_t*)data, 7, TMemoryBuffer::OBSERVE);
string str = buf1.getBufferAsString();
BOOST_CHECK(str.length() == 7);
buf1.resetBuffer();
BOOST_CHECK_THROW(buf1.write((const uint8_t*)"foo", 3), TTransportException);
TMemoryBuffer buf2((uint8_t*)data, 7, TMemoryBuffer::COPY);
BOOST_CHECK_NO_THROW(buf2.write((const uint8_t*)"bar", 3));
}
BOOST_AUTO_TEST_SUITE_END()
| 1 | 13,392 | This makes me wonder if the TBufferTransport should have a size limit that is configurable, with a default of INT32_MAX, and then the test can make a smaller one like 4KB, and write 4KB and then one byte more, instead of using up 2GB of memory. | apache-thrift | c |
@@ -255,6 +255,8 @@ public class ApiMethodTransformer {
methodViewBuilder.stubName(namer.getStubName(context.getTargetInterface()));
methodViewBuilder.settingsGetterName(namer.getSettingsFunctionName(context.getMethod()));
methodViewBuilder.callableName(context.getNamer().getCallableName(context.getMethod()));
+ methodViewBuilder.isRequestStreaming(context.getMethod().getRequestStreaming());
+ methodViewBuilder.isResponseStreaming(context.getMethod().getResponseStreaming());
}
private void setListMethodFields( | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import com.google.api.codegen.CollectionConfig;
import com.google.api.codegen.MethodConfig;
import com.google.api.codegen.PageStreamingConfig;
import com.google.api.codegen.ServiceMessages;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.viewmodel.ApiMethodDocView;
import com.google.api.codegen.viewmodel.ApiMethodType;
import com.google.api.codegen.viewmodel.CallableMethodDetailView;
import com.google.api.codegen.viewmodel.DynamicLangDefaultableParamView;
import com.google.api.codegen.viewmodel.ListMethodDetailView;
import com.google.api.codegen.viewmodel.MapParamDocView;
import com.google.api.codegen.viewmodel.OptionalArrayMethodView;
import com.google.api.codegen.viewmodel.ParamDocView;
import com.google.api.codegen.viewmodel.PathTemplateCheckView;
import com.google.api.codegen.viewmodel.RequestObjectMethodDetailView;
import com.google.api.codegen.viewmodel.RequestObjectParamView;
import com.google.api.codegen.viewmodel.SimpleParamDocView;
import com.google.api.codegen.viewmodel.StaticLangApiMethodView;
import com.google.api.codegen.viewmodel.StaticLangApiMethodView.Builder;
import com.google.api.codegen.viewmodel.UnpagedListCallableMethodDetailView;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.TypeRef;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Type;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/** ApiMethodTransformer generates view objects from method definitions. */
public class ApiMethodTransformer {
private InitCodeTransformer initCodeTransformer;
public ApiMethodTransformer() {
this.initCodeTransformer = new InitCodeTransformer();
}
public StaticLangApiMethodView generatePagedFlattenedMethod(
MethodTransformerContext context, ImmutableList<Field> fields) {
return generatePagedFlattenedMethod(
context, fields, Collections.<ParamWithSimpleDoc>emptyList());
}
public StaticLangApiMethodView generatePagedFlattenedMethod(
MethodTransformerContext context,
ImmutableList<Field> fields,
List<ParamWithSimpleDoc> additionalParams) {
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(context.getNamer().getApiMethodName(context.getMethod()));
methodViewBuilder.exampleName(
context.getNamer().getApiMethodExampleName(context.getInterface(), context.getMethod()));
setListMethodFields(context, methodViewBuilder);
methodViewBuilder.isPageStreaming(true);
setFlattenedMethodFields(
context, fields, additionalParams, Synchronicity.Sync, methodViewBuilder);
return methodViewBuilder.type(ApiMethodType.PagedFlattenedMethod).build();
}
public StaticLangApiMethodView generatePagedFlattenedAsyncMethod(
MethodTransformerContext context,
ImmutableList<Field> fields,
List<ParamWithSimpleDoc> additionalParams) {
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(context.getNamer().getAsyncApiMethodName(context.getMethod()));
methodViewBuilder.exampleName(
context.getNamer().getAsyncApiMethodExampleName(context.getMethod()));
methodViewBuilder.isPageStreaming(true);
setListMethodFields(context, methodViewBuilder);
setFlattenedMethodFields(
context, fields, additionalParams, Synchronicity.Async, methodViewBuilder);
return methodViewBuilder.type(ApiMethodType.PagedFlattenedAsyncMethod).build();
}
public StaticLangApiMethodView generatePagedRequestObjectMethod(
MethodTransformerContext context) {
SurfaceNamer namer = context.getNamer();
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(namer.getApiMethodName(context.getMethod()));
methodViewBuilder.exampleName(
context.getNamer().getApiMethodExampleName(context.getInterface(), context.getMethod()));
setListMethodFields(context, methodViewBuilder);
setRequestObjectMethodFields(
context, namer.getPagedCallableMethodName(context.getMethod()), methodViewBuilder);
methodViewBuilder.isPageStreaming(true);
return methodViewBuilder.type(ApiMethodType.PagedRequestObjectMethod).build();
}
public StaticLangApiMethodView generatePagedCallableMethod(MethodTransformerContext context) {
SurfaceNamer namer = context.getNamer();
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(namer.getPagedCallableMethodName(context.getMethod()));
methodViewBuilder.exampleName(
context
.getNamer()
.getPagedCallableMethodExampleName(context.getInterface(), context.getMethod()));
setListMethodFields(context, methodViewBuilder);
setCallableMethodFields(
context, namer.getPagedCallableName(context.getMethod()), methodViewBuilder);
methodViewBuilder.isPageStreaming(true);
return methodViewBuilder.type(ApiMethodType.PagedCallableMethod).build();
}
public StaticLangApiMethodView generateUnpagedListCallableMethod(
MethodTransformerContext context) {
SurfaceNamer namer = context.getNamer();
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(namer.getCallableMethodName(context.getMethod()));
methodViewBuilder.exampleName(
context
.getNamer()
.getCallableMethodExampleName(context.getInterface(), context.getMethod()));
setListMethodFields(context, methodViewBuilder);
setCallableMethodFields(context, namer.getCallableName(context.getMethod()), methodViewBuilder);
String getResourceListCallName =
namer.getGetResourceListCallName(
context.getMethodConfig().getPageStreaming().getResourcesField());
UnpagedListCallableMethodDetailView unpagedListCallableDetails =
UnpagedListCallableMethodDetailView.newBuilder()
.resourceListGetFunction(getResourceListCallName)
.build();
methodViewBuilder.unpagedListCallableMethod(unpagedListCallableDetails);
methodViewBuilder.responseTypeName(
context.getTypeTable().getAndSaveNicknameFor(context.getMethod().getOutputType()));
methodViewBuilder.isPageStreaming(false);
return methodViewBuilder.type(ApiMethodType.UnpagedListCallableMethod).build();
}
public StaticLangApiMethodView generateFlattenedAsyncMethod(
MethodTransformerContext context,
ImmutableList<Field> fields,
List<ParamWithSimpleDoc> additionalParams,
ApiMethodType type) {
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(context.getNamer().getAsyncApiMethodName(context.getMethod()));
methodViewBuilder.callableName(context.getNamer().getCallableName(context.getMethod()));
setFlattenedMethodFields(
context, fields, additionalParams, Synchronicity.Async, methodViewBuilder);
setStaticLangReturnFields(context, Synchronicity.Async, methodViewBuilder);
methodViewBuilder.isPageStreaming(false);
methodViewBuilder.exampleName(
context
.getNamer()
.getCallableMethodExampleName(context.getInterface(), context.getMethod()));
return methodViewBuilder.type(type).build();
}
public StaticLangApiMethodView generateFlattenedMethod(
MethodTransformerContext context, ImmutableList<Field> fields) {
return generateFlattenedMethod(context, fields, Collections.<ParamWithSimpleDoc>emptyList());
}
public StaticLangApiMethodView generateFlattenedMethod(
MethodTransformerContext context,
ImmutableList<Field> fields,
List<ParamWithSimpleDoc> additionalParams) {
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(context.getNamer().getApiMethodName(context.getMethod()));
methodViewBuilder.exampleName(
context.getNamer().getApiMethodExampleName(context.getInterface(), context.getMethod()));
methodViewBuilder.isPageStreaming(false);
methodViewBuilder.callableName(context.getNamer().getCallableName(context.getMethod()));
setFlattenedMethodFields(
context, fields, additionalParams, Synchronicity.Sync, methodViewBuilder);
setStaticLangReturnFields(context, Synchronicity.Sync, methodViewBuilder);
return methodViewBuilder.type(ApiMethodType.FlattenedMethod).build();
}
public StaticLangApiMethodView generateRequestObjectMethod(MethodTransformerContext context) {
SurfaceNamer namer = context.getNamer();
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(namer.getApiMethodName(context.getMethod()));
methodViewBuilder.exampleName(
context.getNamer().getApiMethodExampleName(context.getInterface(), context.getMethod()));
setRequestObjectMethodFields(
context, namer.getCallableMethodName(context.getMethod()), methodViewBuilder);
methodViewBuilder.isPageStreaming(false);
setStaticLangReturnFields(context, Synchronicity.Sync, methodViewBuilder);
return methodViewBuilder.type(ApiMethodType.RequestObjectMethod).build();
}
public StaticLangApiMethodView generateCallableMethod(MethodTransformerContext context) {
SurfaceNamer namer = context.getNamer();
StaticLangApiMethodView.Builder methodViewBuilder = StaticLangApiMethodView.newBuilder();
setCommonFields(context, methodViewBuilder);
methodViewBuilder.name(namer.getCallableMethodName(context.getMethod()));
methodViewBuilder.exampleName(
context
.getNamer()
.getCallableMethodExampleName(context.getInterface(), context.getMethod()));
setCallableMethodFields(context, namer.getCallableName(context.getMethod()), methodViewBuilder);
methodViewBuilder.responseTypeName(
context.getTypeTable().getAndSaveNicknameFor(context.getMethod().getOutputType()));
methodViewBuilder.hasReturnValue(
!ServiceMessages.s_isEmptyType(context.getMethod().getOutputType()));
methodViewBuilder.isPageStreaming(false);
return methodViewBuilder.type(ApiMethodType.CallableMethod).build();
}
private void setCommonFields(
MethodTransformerContext context, StaticLangApiMethodView.Builder methodViewBuilder) {
SurfaceNamer namer = context.getNamer();
String requestTypeName =
context.getTypeTable().getAndSaveNicknameFor(context.getMethod().getInputType());
methodViewBuilder.apiRequestTypeName(requestTypeName);
methodViewBuilder.apiRequestTypeConstructor(namer.getTypeConstructor(requestTypeName));
methodViewBuilder.apiClassName(namer.getApiWrapperClassName(context.getInterface()));
methodViewBuilder.apiVariableName(namer.getApiWrapperVariableName(context.getInterface()));
methodViewBuilder.stubName(namer.getStubName(context.getTargetInterface()));
methodViewBuilder.settingsGetterName(namer.getSettingsFunctionName(context.getMethod()));
methodViewBuilder.callableName(context.getNamer().getCallableName(context.getMethod()));
}
private void setListMethodFields(
MethodTransformerContext context, StaticLangApiMethodView.Builder methodViewBuilder) {
ModelTypeTable typeTable = context.getTypeTable();
SurfaceNamer namer = context.getNamer();
PageStreamingConfig pageStreaming = context.getMethodConfig().getPageStreaming();
String requestTypeName = typeTable.getAndSaveNicknameFor(context.getMethod().getInputType());
String responseTypeName = typeTable.getAndSaveNicknameFor(context.getMethod().getOutputType());
Field resourceField = pageStreaming.getResourcesField();
String resourceTypeName =
context
.getNamer()
.getAndSaveElementFieldTypeName(
context.getFeatureConfig(), context.getTypeTable(), resourceField);
String resourceFieldName = context.getNamer().getFieldName(pageStreaming.getResourcesField());
String resourceFieldGetFunctionName =
namer.getFieldGetFunctionName(context.getFeatureConfig(), resourceField);
methodViewBuilder.listMethod(
ListMethodDetailView.newBuilder()
.requestTypeName(requestTypeName)
.responseTypeName(responseTypeName)
.resourceTypeName(resourceTypeName)
.resourceFieldName(resourceFieldName)
.resourcesFieldGetFunction(resourceFieldGetFunctionName)
.responseObjectTypeName(
context.getTypeTable().getAndSaveNicknameFor(context.getMethod().getOutputType()))
.build());
methodViewBuilder.responseTypeName(
context
.getNamer()
.getAndSavePagedResponseTypeName(
context.getFeatureConfig(),
context.getTypeTable(),
context.getMethod().getInputType(),
context.getMethod().getOutputType(),
resourceField));
methodViewBuilder.hasReturnValue(true);
}
private void setFlattenedMethodFields(
MethodTransformerContext context,
ImmutableList<Field> fields,
List<ParamWithSimpleDoc> additionalParams,
Synchronicity synchronicity,
StaticLangApiMethodView.Builder methodViewBuilder) {
SurfaceNamer namer = context.getNamer();
methodViewBuilder.initCode(
initCodeTransformer.generateInitCode(context.cloneWithEmptyTypeTable(), fields));
methodViewBuilder.doc(
ApiMethodDocView.newBuilder()
.mainDocLines(namer.getDocLines(context.getMethod()))
.paramDocs(getMethodParamDocs(context, fields, additionalParams))
.throwsDocLines(namer.getThrowsDocLines())
.returnsDocLines(
namer.getReturnDocLines(
context.getSurfaceTransformerContext(),
context.getMethodConfig(),
synchronicity))
.build());
List<RequestObjectParamView> params = new ArrayList<>();
for (Field field : fields) {
params.add(generateRequestObjectParam(context, field));
}
methodViewBuilder.forwardingMethodParams(params);
List<RequestObjectParamView> nonforwardingParams = new ArrayList<>(params);
nonforwardingParams.addAll(ParamWithSimpleDoc.asRequestObjectParamViews(additionalParams));
methodViewBuilder.methodParams(nonforwardingParams);
methodViewBuilder.requestObjectParams(params);
methodViewBuilder.pathTemplateChecks(generatePathTemplateChecks(context, fields));
}
private void setRequestObjectMethodFields(
MethodTransformerContext context,
String callableMethodName,
StaticLangApiMethodView.Builder methodViewBuilder) {
SurfaceNamer namer = context.getNamer();
methodViewBuilder.doc(
ApiMethodDocView.newBuilder()
.mainDocLines(namer.getDocLines(context.getMethod()))
.paramDocs(
Arrays.<ParamDocView>asList(
getRequestObjectParamDoc(context, context.getMethod().getInputType())))
.throwsDocLines(namer.getThrowsDocLines())
.build());
methodViewBuilder.initCode(
initCodeTransformer.generateRequestObjectInitCode(context.cloneWithEmptyTypeTable()));
methodViewBuilder.methodParams(new ArrayList<RequestObjectParamView>());
methodViewBuilder.requestObjectParams(new ArrayList<RequestObjectParamView>());
methodViewBuilder.pathTemplateChecks(new ArrayList<PathTemplateCheckView>());
RequestObjectMethodDetailView.Builder detailBuilder =
RequestObjectMethodDetailView.newBuilder();
if (context.getMethodConfig().hasRequestObjectMethod()) {
detailBuilder.accessModifier(context.getNamer().getPublicAccessModifier());
} else {
detailBuilder.accessModifier(context.getNamer().getPrivateAccessModifier());
}
detailBuilder.callableMethodName(callableMethodName);
methodViewBuilder.requestObjectMethod(detailBuilder.build());
}
private void setCallableMethodFields(
MethodTransformerContext context, String callableName, Builder methodViewBuilder) {
methodViewBuilder.doc(
ApiMethodDocView.newBuilder()
.mainDocLines(context.getNamer().getDocLines(context.getMethod()))
.paramDocs(new ArrayList<ParamDocView>())
.throwsDocLines(new ArrayList<String>())
.build());
methodViewBuilder.initCode(
initCodeTransformer.generateRequestObjectInitCode(context.cloneWithEmptyTypeTable()));
methodViewBuilder.methodParams(new ArrayList<RequestObjectParamView>());
methodViewBuilder.requestObjectParams(new ArrayList<RequestObjectParamView>());
methodViewBuilder.pathTemplateChecks(new ArrayList<PathTemplateCheckView>());
String genericAwareResponseTypeFullName =
context.getNamer().getGenericAwareResponseTypeName(context.getMethod().getOutputType());
String genericAwareResponseType =
context.getTypeTable().getAndSaveNicknameFor(genericAwareResponseTypeFullName);
methodViewBuilder.callableMethod(
CallableMethodDetailView.newBuilder()
.genericAwareResponseType(genericAwareResponseType)
.callableName(callableName)
.build());
}
private void setStaticLangReturnFields(
MethodTransformerContext context,
Synchronicity synchronicity,
StaticLangApiMethodView.Builder methodViewBuilder) {
SurfaceNamer namer = context.getNamer();
String syncReturnTypeFullName =
namer.getStaticLangReturnTypeName(context.getMethod(), context.getMethodConfig());
String syncNickname = context.getTypeTable().getAndSaveNicknameFor(syncReturnTypeFullName);
switch (synchronicity) {
case Async:
String asyncReturnTypeFullName =
namer.getStaticLangAsyncReturnTypeName(context.getMethod(), context.getMethodConfig());
String asyncNickname =
context.getTypeTable().getAndSaveNicknameFor(asyncReturnTypeFullName);
methodViewBuilder.responseTypeName(asyncNickname);
break;
case Sync:
methodViewBuilder.responseTypeName(syncNickname);
break;
}
methodViewBuilder.hasReturnValue(
!ServiceMessages.s_isEmptyType(context.getMethod().getOutputType()));
}
private List<PathTemplateCheckView> generatePathTemplateChecks(
MethodTransformerContext context, ImmutableList<Field> fields) {
List<PathTemplateCheckView> pathTemplateChecks = new ArrayList<>();
for (Field field : fields) {
if (context.getFeatureConfig().useResourceNameFormatOption(field)) {
// Don't generate a path template check when using a ResourceName type instead of a string
continue;
}
ImmutableMap<String, String> fieldNamePatterns =
context.getMethodConfig().getFieldNamePatterns();
String entityName = fieldNamePatterns.get(field.getSimpleName());
if (entityName != null) {
CollectionConfig collectionConfig = context.getCollectionConfig(entityName);
if (collectionConfig == null) {
throw new IllegalStateException("No collection config with id '" + entityName + "'");
}
PathTemplateCheckView.Builder check = PathTemplateCheckView.newBuilder();
check.pathTemplateName(
context.getNamer().getPathTemplateName(context.getInterface(), collectionConfig));
check.paramName(context.getNamer().getVariableName(field));
check.allowEmptyString(shouldAllowEmpty(context, field));
check.validationMessageContext(context.getNamer().getApiMethodName(context.getMethod()));
pathTemplateChecks.add(check.build());
}
}
return pathTemplateChecks;
}
private boolean shouldAllowEmpty(MethodTransformerContext context, Field field) {
for (Field requiredField : context.getMethodConfig().getRequiredFields()) {
if (requiredField.equals(field)) {
return false;
}
}
return true;
}
public OptionalArrayMethodView generateDynamicLangApiMethod(MethodTransformerContext context) {
SurfaceNamer namer = context.getNamer();
OptionalArrayMethodView.Builder apiMethod = OptionalArrayMethodView.newBuilder();
if (context.getMethodConfig().isPageStreaming()) {
apiMethod.type(ApiMethodType.PagedOptionalArrayMethod);
} else {
apiMethod.type(ApiMethodType.OptionalArrayMethod);
}
apiMethod.apiClassName(namer.getApiWrapperClassName(context.getInterface()));
apiMethod.apiVariableName(namer.getApiWrapperVariableName(context.getInterface()));
apiMethod.apiModuleName(namer.getApiWrapperModuleName(context.getInterface()));
apiMethod.initCode(
initCodeTransformer.generateInitCode(
context.cloneWithEmptyTypeTable(), context.getMethodConfig().getRequiredFields()));
apiMethod.doc(generateOptionalArrayMethodDoc(context));
apiMethod.name(namer.getApiMethodName(context.getMethod()));
apiMethod.requestTypeName(
context.getTypeTable().getAndSaveNicknameFor(context.getMethod().getInputType()));
apiMethod.hasReturnValue(!ServiceMessages.s_isEmptyType(context.getMethod().getOutputType()));
apiMethod.key(namer.getMethodKey(context.getMethod()));
apiMethod.grpcMethodName(namer.getGrpcMethodName(context.getMethod()));
apiMethod.stubName(namer.getStubName(context.getTargetInterface()));
apiMethod.methodParams(generateOptionalArrayMethodParams(context));
apiMethod.requiredRequestObjectParams(
generateRequestObjectParams(context, context.getMethodConfig().getRequiredFields()));
apiMethod.optionalRequestObjectParams(
generateRequestObjectParams(context, context.getMethodConfig().getOptionalFields()));
return apiMethod.build();
}
private ApiMethodDocView generateOptionalArrayMethodDoc(MethodTransformerContext context) {
ApiMethodDocView.Builder docBuilder = ApiMethodDocView.newBuilder();
docBuilder.mainDocLines(context.getNamer().getDocLines(context.getMethod()));
List<ParamDocView> paramDocs =
getMethodParamDocs(
context,
context.getMethodConfig().getRequiredFields(),
Collections.<ParamWithSimpleDoc>emptyList());
paramDocs.add(getOptionalArrayParamDoc(context, context.getMethodConfig().getOptionalFields()));
docBuilder.paramDocs(paramDocs);
docBuilder.returnTypeName(
context
.getNamer()
.getDynamicLangReturnTypeName(context.getMethod(), context.getMethodConfig()));
docBuilder.throwsDocLines(new ArrayList<String>());
return docBuilder.build();
}
private List<DynamicLangDefaultableParamView> generateOptionalArrayMethodParams(
MethodTransformerContext context) {
List<DynamicLangDefaultableParamView> methodParams =
generateDefaultableParams(context, context.getMethodConfig().getRequiredFields());
// TODO create a map TypeRef here instead of an array
// (not done yet because array is sufficient for PHP, and maps are more complex to construct)
TypeRef arrayType = TypeRef.fromPrimitiveName("string").makeRepeated();
DynamicLangDefaultableParamView.Builder optionalArgs =
DynamicLangDefaultableParamView.newBuilder();
optionalArgs.name(context.getNamer().localVarName(Name.from("optional", "args")));
optionalArgs.defaultValue(context.getTypeTable().getZeroValueAndSaveNicknameFor(arrayType));
methodParams.add(optionalArgs.build());
return methodParams;
}
private List<DynamicLangDefaultableParamView> generateDefaultableParams(
MethodTransformerContext context, Iterable<Field> fields) {
List<DynamicLangDefaultableParamView> methodParams = new ArrayList<>();
for (Field field : context.getMethodConfig().getRequiredFields()) {
DynamicLangDefaultableParamView param =
DynamicLangDefaultableParamView.newBuilder()
.name(context.getNamer().getVariableName(field))
.defaultValue("")
.build();
methodParams.add(param);
}
return methodParams;
}
private List<RequestObjectParamView> generateRequestObjectParams(
MethodTransformerContext context, Iterable<Field> fields) {
List<RequestObjectParamView> params = new ArrayList<>();
for (Field field : fields) {
params.add(generateRequestObjectParam(context, field));
}
return params;
}
private RequestObjectParamView generateRequestObjectParam(
MethodTransformerContext context, Field field) {
SurfaceNamer namer = context.getNamer();
FeatureConfig featureConfig = context.getFeatureConfig();
ModelTypeTable typeTable = context.getTypeTable();
String typeName =
namer.getNotImplementedString("ApiMethodTransformer.generateRequestObjectParam - typeName");
String elementTypeName =
namer.getNotImplementedString(
"ApiMethodTransformer.generateRequestObjectParam - elementTypeName");
if (namer.shouldImportRequestObjectParamType(field)) {
typeName = namer.getAndSaveFieldTypeName(featureConfig, typeTable, field);
}
if (namer.shouldImportRequestObjectParamElementType(field)) {
elementTypeName = namer.getAndSaveElementFieldTypeName(featureConfig, typeTable, field);
}
String setCallName = namer.getFieldSetFunctionName(featureConfig, field);
RequestObjectParamView.Builder param = RequestObjectParamView.newBuilder();
param.name(namer.getVariableName(field));
param.nameAsMethodName(namer.getFieldAsMethodName(field));
param.typeName(typeName);
param.elementTypeName(elementTypeName);
param.setCallName(setCallName);
param.isMap(field.getType().isMap());
param.isArray(!field.getType().isMap() && field.getType().isRepeated());
return param.build();
}
private List<ParamDocView> getMethodParamDocs(
MethodTransformerContext context,
Iterable<Field> fields,
List<ParamWithSimpleDoc> additionalParamDocs) {
List<ParamDocView> allDocs = new ArrayList<>();
for (Field field : fields) {
SimpleParamDocView.Builder paramDoc = SimpleParamDocView.newBuilder();
paramDoc.paramName(context.getNamer().getVariableName(field));
paramDoc.typeName(context.getTypeTable().getAndSaveNicknameFor(field.getType()));
List<String> docLines = null;
MethodConfig methodConfig = context.getMethodConfig();
if (methodConfig.isPageStreaming()
&& methodConfig.getPageStreaming().hasPageSizeField()
&& field.equals(methodConfig.getPageStreaming().getPageSizeField())) {
docLines =
Arrays.asList(
new String[] {
"The maximum number of resources contained in the underlying API",
"response. The API may return fewer values in a page, even if",
"there are additional values to be retrieved."
});
} else if (methodConfig.isPageStreaming()
&& field.equals(methodConfig.getPageStreaming().getRequestTokenField())) {
docLines =
Arrays.asList(
new String[] {
"A page token is used to specify a page of values to be returned.",
"If no page token is specified (the default), the first page",
"of values will be returned. Any page token used here must have",
"been generated by a previous call to the API."
});
} else {
docLines = context.getNamer().getDocLines(field);
}
paramDoc.lines(docLines);
paramDoc.firstLine(docLines.get(0));
paramDoc.remainingLines(docLines.subList(1, docLines.size()));
allDocs.add(paramDoc.build());
}
allDocs.addAll(ParamWithSimpleDoc.asParamDocViews(additionalParamDocs));
return allDocs;
}
public SimpleParamDocView getRequestObjectParamDoc(
MethodTransformerContext context, TypeRef typeRef) {
return SimpleParamDocView.newBuilder()
.paramName("request")
.typeName(context.getTypeTable().getAndSaveNicknameFor(typeRef))
.firstLine("The request object containing all of the parameters for the API call.")
.remainingLines(Arrays.<String>asList())
.build();
}
private ParamDocView getOptionalArrayParamDoc(
MethodTransformerContext context, Iterable<Field> fields) {
MapParamDocView.Builder paramDoc = MapParamDocView.newBuilder();
Name optionalArgsName = Name.from("optional", "args");
paramDoc.paramName(context.getNamer().localVarName(optionalArgsName));
paramDoc.typeName(context.getNamer().getOptionalArrayTypeName());
List<String> docLines = Arrays.asList("Optional.");
paramDoc.firstLine(docLines.get(0));
paramDoc.remainingLines(docLines.subList(1, docLines.size()));
paramDoc.arrayKeyDocs(
ImmutableList.<ParamDocView>builder()
.addAll(
getMethodParamDocs(context, fields, Collections.<ParamWithSimpleDoc>emptyList()))
.addAll(getCallSettingsParamDocList(context))
.build());
return paramDoc.build();
}
private List<ParamDocView> getCallSettingsParamDocList(MethodTransformerContext context) {
List<ParamDocView> arrayKeyDocs = new ArrayList<>();
SimpleParamDocView.Builder retrySettingsDoc = SimpleParamDocView.newBuilder();
retrySettingsDoc.typeName(context.getNamer().getRetrySettingsTypeName());
Name retrySettingsName = Name.from("retry", "settings");
Name timeoutMillisName = Name.from("timeout", "millis");
retrySettingsDoc.paramName(context.getNamer().localVarName(retrySettingsName));
// TODO figure out a reliable way to line-wrap comments across all languages
// instead of encoding it in the transformer
String retrySettingsDocText =
String.format(
"Retry settings to use for this call. If present, then\n%s is ignored.",
context.getNamer().varReference(timeoutMillisName));
List<String> retrySettingsDocLines = context.getNamer().getDocLines(retrySettingsDocText);
retrySettingsDoc.firstLine(retrySettingsDocLines.get(0));
retrySettingsDoc.remainingLines(retrySettingsDocLines.subList(1, retrySettingsDocLines.size()));
arrayKeyDocs.add(retrySettingsDoc.build());
SimpleParamDocView.Builder timeoutDoc = SimpleParamDocView.newBuilder();
timeoutDoc.typeName(context.getTypeTable().getAndSaveNicknameFor(TypeRef.of(Type.TYPE_INT32)));
timeoutDoc.paramName(context.getNamer().localVarName(timeoutMillisName));
// TODO figure out a reliable way to line-wrap comments across all languages
// instead of encoding it in the transformer
String timeoutMillisDocText =
String.format(
"Timeout to use for this call. Only used if %s\nis not set.",
context.getNamer().varReference(retrySettingsName));
List<String> timeoutMillisDocLines = context.getNamer().getDocLines(timeoutMillisDocText);
timeoutDoc.firstLine(timeoutMillisDocLines.get(0));
timeoutDoc.remainingLines(timeoutMillisDocLines.subList(1, timeoutMillisDocLines.size()));
arrayKeyDocs.add(timeoutDoc.build());
return arrayKeyDocs;
}
}
| 1 | 18,252 | If you use an enum for the streaming type, you wouldn't need to add each boolean as a separate attribute. | googleapis-gapic-generator | java |
@@ -98,6 +98,7 @@ from invenio.utils.serializers import serialize_via_marshal, \
deserialize_via_marshal
from sqlalchemy.exc import DatabaseError
+from MySQLdb import IntegrityError
from .engine_utils import get_index_id_from_index_name
| 1 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009,
# 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""
BibIndex indexing engine implementation.
See bibindex executable for entry point.
"""
__revision__ = "$Id$"
import re
import sys
import time
import fnmatch
import inspect
from datetime import datetime
from six import iteritems
from invenio.config import CFG_SOLR_URL
from invenio.legacy.bibindex.engine_config import CFG_MAX_MYSQL_THREADS, \
CFG_MYSQL_THREAD_TIMEOUT, \
CFG_CHECK_MYSQL_THREADS, \
CFG_BIBINDEX_INDEX_TABLE_TYPE, \
CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR, \
CFG_BIBINDEX_UPDATE_MESSAGE, \
CFG_BIBINDEX_UPDATE_MODE, \
CFG_BIBINDEX_TOKENIZER_TYPE, \
CFG_BIBINDEX_WASH_INDEX_TERMS, \
CFG_BIBINDEX_SPECIAL_TAGS
from invenio.legacy.bibauthority.config import \
CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC
from invenio.legacy.bibauthority.engine import get_index_strings_by_control_no,\
get_control_nos_from_recID
from invenio.legacy.search_engine import perform_request_search, \
get_synonym_terms, \
search_pattern
from invenio.legacy.dbquery import run_sql, wash_table_column_name
from invenio.legacy.bibindex.engine_washer import wash_index_term
from invenio.legacy.bibsched.bibtask import task_init, write_message, get_datetime, \
task_set_option, task_get_option, task_get_task_param, \
task_update_progress, task_sleep_now_if_required
from intbitset import intbitset
from invenio.ext.logging import register_exception
from invenio.legacy.bibrank.adminlib import get_def_name
from invenio.legacy.miscutil.solrutils_bibindex_indexer import solr_commit
from invenio.modules.indexer.tokenizers.BibIndexJournalTokenizer import \
CFG_JOURNAL_TAG, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM_REGEXP_CHECK
from invenio.legacy.bibindex.termcollectors import TermCollector
from invenio.legacy.bibindex.engine_utils import load_tokenizers, \
get_all_index_names_and_column_values, \
get_index_tags, \
get_field_tags, \
get_marc_tag_indexes, \
get_nonmarc_tag_indexes, \
get_all_indexes, \
get_index_virtual_indexes, \
get_virtual_index_building_blocks, \
run_sql_drop_silently, \
get_min_last_updated, \
remove_inexistent_indexes, \
get_all_synonym_knowledge_bases, \
get_index_remove_stopwords, \
get_index_remove_html_markup, \
get_index_remove_latex_markup, \
filter_for_virtual_indexes, \
get_records_range_for_index, \
make_prefix, \
list_union, \
recognize_marc_tag
from invenio.modules.indexer.cache import get_index_stemming_language
from invenio.modules.records.api import get_record
from invenio.utils.memoise import Memoise
from invenio.legacy.bibindex.termcollectors import \
TermCollector, \
NonmarcTermCollector
from invenio.utils.serializers import serialize_via_marshal, \
deserialize_via_marshal
from sqlalchemy.exc import DatabaseError
from .engine_utils import get_index_id_from_index_name
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
# precompile some often-used regexp for speed reasons:
re_subfields = re.compile('\$\$\w')
re_datetime_shift = re.compile("([-\+]{0,1})([\d]+)([dhms])")
re_prefix = re.compile('__[a-zA-Z1-9]*__')
nb_char_in_line = 50 # for verbose pretty printing
chunksize = 1000 # default size of chunks that the records will be treated by
base_process_size = 4500 # process base size
_last_word_table = None
_TOKENIZERS = load_tokenizers()
def list_unique(_list):
"""Returns a _list with duplicates removed."""
_dict = {}
for e in _list:
_dict[e] = 1
return _dict.keys()
# safety function for killing slow DB threads:
def kill_sleepy_mysql_threads(max_threads=CFG_MAX_MYSQL_THREADS,
thread_timeout=CFG_MYSQL_THREAD_TIMEOUT):
"""Check the number of DB threads and if there are more than
MAX_THREADS of them, lill all threads that are in a sleeping
state for more than THREAD_TIMEOUT seconds. (This is useful
for working around the the max_connection problem that appears
during indexation in some not-yet-understood cases.) If some
threads are to be killed, write info into the log file.
"""
res = run_sql("SHOW FULL PROCESSLIST")
if len(res) > max_threads:
for row in res:
r_id, dummy, dummy, dummy, r_command, r_time, dummy, dummy = row
if r_command == "Sleep" and int(r_time) > thread_timeout:
run_sql("KILL %s", (r_id, ))
write_message("WARNING: too many DB threads, " + \
"killing thread %s" % r_id, verbose=1)
return
def get_associated_subfield_value(recID, tag, value, associated_subfield_code):
"""Return list of ASSOCIATED_SUBFIELD_CODE, if exists, for record
RECID and TAG of value VALUE. Used by fulltext indexer only.
Note: TAG must be 6 characters long (tag+ind1+ind2+sfcode),
otherwise en empty string is returned.
FIXME: what if many tag values have the same value but different
associated_subfield_code? Better use bibrecord library for this.
"""
out = ""
if len(tag) != 6:
return out
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT bb.field_number, b.tag, b.value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id AND tag LIKE
%%s%%""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag[:-1]))
field_number = -1
for row in res:
if row[1] == tag and row[2] == value:
field_number = row[0]
if field_number > 0:
for row in res:
if row[0] == field_number and row[1] == tag[:-1] + associated_subfield_code:
out = row[2]
break
return out
def get_author_canonical_ids_for_recid(recID):
"""
Return list of author canonical IDs (e.g. `J.Ellis.1') for the
given record. Done by consulting BibAuthorID module.
"""
return []
def swap_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Atomically swap reindexed temporary table with the original one.
Delete the now-old one."""
write_message("Putting new tmp index tables " + \
"for id %s into production" % index_id)
run_sql(
"RENAME TABLE " +
"idxWORD%02dR TO old_idxWORD%02dR," % (index_id, index_id) +
"%sidxWORD%02dR TO idxWORD%02dR," % (reindex_prefix, index_id, index_id) +
"idxWORD%02dF TO old_idxWORD%02dF," % (index_id, index_id) +
"%sidxWORD%02dF TO idxWORD%02dF," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dR TO old_idxPAIR%02dR," % (index_id, index_id) +
"%sidxPAIR%02dR TO idxPAIR%02dR," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dF TO old_idxPAIR%02dF," % (index_id, index_id) +
"%sidxPAIR%02dF TO idxPAIR%02dF," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dR TO old_idxPHRASE%02dR," % (index_id, index_id) +
"%sidxPHRASE%02dR TO idxPHRASE%02dR," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dF TO old_idxPHRASE%02dF," % (index_id, index_id) +
"%sidxPHRASE%02dF TO idxPHRASE%02dF;" % (reindex_prefix, index_id, index_id)
)
write_message("Dropping old index tables for id %s" % index_id)
run_sql_drop_silently("""DROP TABLE old_idxWORD%02dR,
old_idxWORD%02dF,
old_idxPAIR%02dR,
old_idxPAIR%02dF,
old_idxPHRASE%02dR,
old_idxPHRASE%02dF""" % ((index_id, )* 6)
) # kwalitee: disable=sql
def init_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Create reindexing temporary tables."""
write_message("Creating new tmp index tables for id %s" % index_id)
query = """DROP TABLE IF EXISTS %sidxWORD%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxWORD%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxWORD%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxWORD%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPAIR%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPAIR%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(100) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPAIR%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPAIR%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPHRASE%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPHRASE%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term text default NULL,
hitlist longblob,
PRIMARY KEY (id),
KEY term (term(50))
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPHRASE%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPHRASE%02dR (
id_bibrec mediumint(9) unsigned NOT NULL default '0',
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
def remove_subfields(s):
"Removes subfields from string, e.g. 'foo $$c bar' becomes 'foo bar'."
return re_subfields.sub(' ', s)
def get_field_indexes(field):
"""Returns indexes names and ids corresponding to the given field"""
if recognize_marc_tag(field):
#field is actually a tag
return get_marc_tag_indexes(field, virtual=False)
else:
return get_nonmarc_tag_indexes(field, virtual=False)
get_field_indexes_memoised = Memoise(get_field_indexes)
def get_index_tokenizer(index_id):
"""Returns value of a tokenizer field from idxINDEX database table
@param index_id: id of the index
"""
query = """SELECT tokenizer FROM "idxINDEX" WHERE id=%s""" % index_id
out = None
try:
res = run_sql(query)
if res:
out = _TOKENIZERS[res[0][0]]
except DatabaseError:
write_message("Exception caught for SQL statement: %s; " + \
"column tokenizer might not exist" % query, sys.stderr)
except KeyError:
write_message("Exception caught: there is no such tokenizer")
out = None
return out
def detect_tokenizer_type(tokenizer):
"""
Checks what is the main type of the tokenizer.
For more information on tokenizer types take
a look at BibIndexTokenizer class.
@param tokenizer: instance of a tokenizer
"""
from invenio.modules.indexer.tokenizers.BibIndexStringTokenizer import BibIndexStringTokenizer
from invenio.modules.indexer.tokenizers.BibIndexRecJsonTokenizer import BibIndexRecJsonTokenizer
from invenio.modules.indexer.tokenizers.BibIndexMultiFieldTokenizer import BibIndexMultiFieldTokenizer
tokenizer_inheritance_tree = inspect.getmro(tokenizer.__class__)
if BibIndexStringTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['string']
if BibIndexMultiFieldTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['multifield']
if BibIndexRecJsonTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['recjson']
return CFG_BIBINDEX_TOKENIZER_TYPE['unknown']
def get_last_updated_all_indexes():
"""Returns last modification date for all defined indexes"""
query= """SELECT name, last_updated FROM "idxINDEX" """
res = run_sql(query)
return res
def split_ranges(parse_string):
"""Parse a string a return the list or ranges."""
recIDs = []
ranges = parse_string.split(",")
for arange in ranges:
tmp_recIDs = arange.split("-")
if len(tmp_recIDs) == 1:
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[0])])
else:
if int(tmp_recIDs[0]) > int(tmp_recIDs[1]): # sanity check
tmp = tmp_recIDs[0]
tmp_recIDs[0] = tmp_recIDs[1]
tmp_recIDs[1] = tmp
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[1])])
return recIDs
def get_word_tables(tables):
""" Given a list of table names it return a list of tuples
(index_id, index_name, index_tags).
"""
wordTables = []
if tables:
for index in tables:
index_id = get_index_id_from_index_name(index)
if index_id:
wordTables.append((index_id, index, get_index_tags(index)))
else:
write_message("Error: There is no %s words table." % \
index, sys.stderr)
return wordTables
def get_date_range(var):
"Returns the two dates contained as a low,high tuple"
limits = var.split(",")
if len(limits) == 1:
low = get_datetime(limits[0])
return low, None
if len(limits) == 2:
low = get_datetime(limits[0])
high = get_datetime(limits[1])
return low, high
return None, None
def create_range_list(res):
"""Creates a range list from a recID select query result contained
in res. The result is expected to have ascending numerical order."""
if not res:
return []
row = res[0]
if not row:
return []
else:
range_list = [[row, row]]
for row in res[1:]:
row_id = row
if row_id == range_list[-1][1] + 1:
range_list[-1][1] = row_id
else:
range_list.append([row_id, row_id])
return range_list
def beautify_range_list(range_list):
"""Returns a non overlapping, maximal range list"""
ret_list = []
for new in range_list:
found = 0
for old in ret_list:
if new[0] <= old[0] <= new[1] + 1 or new[0] - 1 <= old[1] <= new[1]:
old[0] = min(old[0], new[0])
old[1] = max(old[1], new[1])
found = 1
break
if not found:
ret_list.append(new)
return ret_list
def truncate_index_table(index_name):
"""Properly truncate the given index."""
index_id = get_index_id_from_index_name(index_name)
if index_id:
write_message('Truncating %s index table in order to reindex.' % \
index_name, verbose=2)
run_sql("""UPDATE "idxINDEX" SET last_updated='1900-01-01 00:00:00'
WHERE id=%s""", (index_id, ))
run_sql("TRUNCATE idxWORD%02dF" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxWORD%02dR" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxPHRASE%02dF" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxPHRASE%02dR" % index_id) # kwalitee: disable=sql
def update_index_last_updated(indexes, starting_time=None):
"""Update last_updated column of the index table in the database.
Puts starting time there so that if the task
was interrupted for record download,
the records will be reindexed next time.
@param indexes: list of indexes names
"""
if starting_time is None:
return None
for index_name in indexes:
write_message("updating last_updated to %s...for %s index" % \
(starting_time, index_name), verbose=9)
run_sql("""UPDATE "idxINDEX" SET last_updated=%s WHERE name=%s""",
(starting_time, index_name))
def get_percentage_completed(num_done, num_total):
""" Return a string containing the approx. percentage completed """
percentage_remaining = 100.0 * float(num_done) / float(num_total)
if percentage_remaining:
percentage_display = "(%.1f%%)" % (percentage_remaining, )
else:
percentage_display = ""
return percentage_display
def _fill_dict_of_indexes_with_empty_sets():
"""find_affected_records internal function.
Creates dict: {'index_name1':set([]), ...}
"""
index_dict = {}
tmp_all_indexes = get_all_indexes(virtual=False)
for index in tmp_all_indexes:
index_dict[index] = set([])
return index_dict
def find_affected_records_for_index(indexes=None, recIDs=None, force_all_indexes=False):
"""
Function checks which records need to be changed/reindexed
for given index/indexes.
Makes use of hstRECORD table where
different revisions of record are kept.
If parameter force_all_indexes is set
function will assign all recIDs to all indexes.
@param indexes: names of indexes for reindexation separated by coma
@param recIDs: recIDs for reindexation in form:
[[range1_down, range1_up],[range2_down, range2_up]..]
@param force_all_indexes: should we index all indexes?
"""
if indexes is None:
indexes = []
if recIDs is None:
recIDs = []
tmp_dates = dict(get_last_updated_all_indexes())
modification_dates = dict([(date, tmp_dates[date] or datetime(1000, 1, 1, 1, 1, 1))
for date in tmp_dates])
tmp_all_indexes = get_all_indexes(virtual=False)
indexes = remove_inexistent_indexes(indexes, leave_virtual=False)
if not indexes:
return {}
def _should_reindex_for_revision(index_name, revision_date):
try:
if modification_dates[index_name] < revision_date and \
index_name in indexes:
return True
return False
except KeyError:
return False
if force_all_indexes:
records_for_indexes = {}
all_recIDs = []
for recIDs_range in recIDs:
all_recIDs.extend(range(recIDs_range[0], recIDs_range[1]+1))
for index in indexes:
records_for_indexes[index] = all_recIDs
return records_for_indexes
min_last_updated = get_min_last_updated(indexes)[0][0] or \
datetime(1000, 1, 1, 1, 1, 1)
recIDs_info = []
for recIDs_range in recIDs:
# firstly, determine which records were updated since min_last_updated:
query = """SELECT id_bibrec,job_date,affected_fields FROM hstRECORD
WHERE id_bibrec BETWEEN %s AND %s AND
job_date > '%s'""" % \
(recIDs_range[0], recIDs_range[1], min_last_updated)
res = run_sql(query)
if res:
recIDs_info.extend(res)
# secondly, there may be newly inserted records which were
# uploaded with old timestamp (via 005), so let us detect
# those too, using their "real" modification_date:
res = run_sql("""SELECT bibrec.id,modification_date,''
FROM bibrec, hstRECORD
WHERE modification_date>%s
AND bibrec.id=id_bibrec
AND (SELECT COUNT(*) FROM hstRECORD WHERE id_bibrec=bibrec.id)=1""", (min_last_updated,))
if res:
recIDs_info.extend(res)
indexes_to_change = _fill_dict_of_indexes_with_empty_sets()
for recID_info in recIDs_info:
recID, revision, affected_fields = recID_info
affected_fields = affected_fields.split(",")
indexes_for_recID = set()
for field in affected_fields:
if field:
field_indexes = get_field_indexes_memoised(field) or []
indexes_names = set([idx[1] for idx in field_indexes])
indexes_for_recID |= indexes_names
else:
# record was inserted, all fields were changed,
# no specific affected fields
indexes_for_recID |= set(tmp_all_indexes)
indexes_for_recID_filtered = [ind for ind in indexes_for_recID if _should_reindex_for_revision(ind, revision)]
for index in indexes_for_recID_filtered:
indexes_to_change[index].add(recID)
indexes_to_change = dict((k, list(sorted(v))) for k, v in iteritems(indexes_to_change) if v)
return indexes_to_change
def chunk_generator(rng):
"""
Splits one range into several smaller ones
with respect to global chunksize variable.
@param rng: range of records
@type rng: list in the form: [1, 2000]
"""
global chunksize
current_low = rng[0]
current_high = rng[0]
if rng[0] == None or rng[1] == None:
raise StopIteration
if rng[1] - rng[0] + 1 <= chunksize:
yield rng
else:
while current_high - 1 < rng[1]:
current_high += chunksize
yield current_low, min(current_high - 1, rng[1])
current_low += chunksize
class AbstractIndexTable(object):
"""
This class represents an index table in database.
An index consists of three different kinds of tables:
table which stores only words in db,
table which stores pairs of words and
table which stores whole phrases.
The class represents only one table. Another instance of
the class must be created in order to store different
type of terms.
This class is an abstract class. It contains methods
to connect to db and methods which facilitate
inserting/modifing/removing terms from it. The class
also contains methods which help managing the memory.
All specific methods for indexing can be found in corresponding
classes for virtual and regular indexes.
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
self.index_name = index_name
self.index_id = get_index_id_from_index_name(index_name)
self.table_type = table_type
self.wash_index_terms = wash_index_terms
self.table_name = wash_table_column_name(table_prefix + \
"idx" + \
table_type + \
("%02d" % self.index_id) + "F")
self.table_prefix = table_prefix
self.value = {} # cache
self.recIDs_in_mem = []
def put_into_db(self, mode="normal"):
"""Updates the current words table in the corresponding DB
idxFOO table. Mode 'normal' means normal execution,
mode 'emergency' means words index reverting to old state.
"""
write_message("%s %s wordtable flush started" % \
(self.table_name, mode))
write_message('...updating %d words into %s started' % \
(len(self.value), self.table_name))
task_update_progress("(%s:%s) flushed %d/%d words" % \
(self.table_name, self.index_name, 0, len(self.value)))
self.recIDs_in_mem = beautify_range_list(self.recIDs_in_mem)
tab_name = self.table_name[:-1] + "R"
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='TEMPORARY' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='CURRENT'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
nb_words_total = len(self.value)
nb_words_report = int(nb_words_total / 10.0)
nb_words_done = 0
for word in self.value.keys():
self.put_word_into_db(word)
nb_words_done += 1
if nb_words_report != 0 and ((nb_words_done % nb_words_report) == 0):
write_message('......processed %d/%d words' % \
(nb_words_done, nb_words_total))
percentage_display = get_percentage_completed(nb_words_done, nb_words_total)
task_update_progress("(%s:%s) flushed %d/%d words %s" % \
(tab_name, self.index_name,
nb_words_done, nb_words_total,
percentage_display))
write_message('...updating %d words into %s ended' % \
(nb_words_total, tab_name))
write_message('...updating reverse table %s started' % tab_name)
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of updating wordTable into %s' % \
tab_name, verbose=9)
elif mode == "emergency":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of emergency flushing wordTable into %s' % \
tab_name, verbose=9)
write_message('...updating reverse table %s ended' % tab_name)
self.clean()
self.recIDs_in_mem = []
write_message("%s %s wordtable flush ended" % \
(self.table_name, mode))
task_update_progress("(%s:%s) flush ended" % \
(self.table_name, self.index_name))
def put_word_into_db(self, word):
"""Flush a single word to the database and delete it from memory"""
set = self.load_old_recIDs(word)
if set is not None: # merge the word recIDs found in memory:
hitlist_was_changed = self.merge_with_old_recIDs(word, set)
if not hitlist_was_changed:
# nothing to update:
write_message("......... unchanged hitlist for ``%s''" % \
word, verbose=9)
else:
# yes there were some new words:
write_message("......... updating hitlist for ``%s''" % \
word, verbose=9)
run_sql("UPDATE %s SET hitlist=%%s WHERE term=%%s" % wash_table_column_name(self.table_name), (set.fastdump(), word)) # kwalitee: disable=sql
else: # the word is new, will create new set:
write_message("......... inserting hitlist for ``%s''" % \
word, verbose=9)
set = intbitset(self.value[word].keys())
try:
run_sql("INSERT INTO %s (term, hitlist) VALUES (%%s, %%s)" % wash_table_column_name(self.table_name), (word, set.fastdump())) # kwalitee: disable=sql
except Exception, e:
## We send this exception to the admin only when is not
## already reparing the problem.
register_exception(prefix="Error when putting the term '%s' into db (hitlist=%s): %s\n" % (repr(word), set, e), alert_admin=(task_get_option('cmd') != 'repair'))
if not set: # never store empty words
run_sql("DELETE FROM %s WHERE term=%%s" % wash_table_column_name(self.table_name), (word,)) # kwalitee: disable=sql
def put(self, recID, word, sign):
"""Keeps track of changes done during indexing
and stores these changes in memory for further use.
Indexing process needs this information later while
filling in the database.
@param recID: recID of the record we want to update in memory
@param word: word we want to update
@param sing: sign of the word, 1 means keep this word in database,
-1 remove word from database
"""
value = self.value
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if word in value:
# the word 'word' exist already: update sign
value[word][recID] = sign
else:
value[word] = {recID: sign}
except Exception as e:
write_message("Error: Cannot put word %s with sign %d for recID %s." % \
(word, sign, recID))
def load_old_recIDs(self, word):
"""Load existing hitlist for the word from the database index files."""
query = "SELECT hitlist FROM %s WHERE term=%%s" % self.table_name
res = run_sql(query, (word, ))
if res:
return intbitset(res[0][0])
else:
return None
def merge_with_old_recIDs(self, word, set):
"""Merge the system numbers stored in memory
(hash of recIDs with value +1 or -1 according
to whether to add/delete them) with those stored
in the database index and received in set universe
of recIDs for the given word.
Return False in case no change was done to SET, return True in case SET
was changed.
"""
oldset = intbitset(set)
set.update_with_signs(self.value[word])
return set != oldset
def clean(self):
"Cleans the cache."
self.value = {}
class VirtualIndexTable(AbstractIndexTable):
"""
There are two types of indexes: virtual and regular/normal.
Check WordTable class for more on normal indexes.
This class represents a single index table for virtual index
(see also: AbstractIndexTable).
Virtual index doesn't store its own terms,
it accumulates terms from other indexes.
Good example of virtual index is the global index which stores
terms from title, abstract, keyword, author and so on.
This class contains methods for indexing virtual indexes.
See also: run_update()
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
"""
Creates VirtualIndexTable instance.
@param index_name: name of the index we want to reindex
@param table_type: words, pairs or phrases
@param table_prefix: add "tmp_" if you want to
reindex to temporary table
"""
AbstractIndexTable.__init__(self, index_name,
table_type,
table_prefix,
wash_index_terms)
self.mode = "normal"
self.dependent_indexes = dict(get_virtual_index_building_blocks(self.index_id))
def set_reindex_mode(self):
"""
Sets reindex mode. VirtualIndexTable will
remove all its content from database and
use insert_index function to repopulate it.
"""
self.mode = "reindex"
def run_update(self, flush=10000):
"""
Function starts all updating processes for virtual index.
It will take all information about pending changes from database
from queue tables (idxWORD/PAIR/PHRASExxQ), process them
and trigger appropriate indexing functions.
@param flush: how many records we will put in one go
into database (at most);
see also: opt_flush in WordTable class
"""
global chunksize
if self.mode == "reindex":
self.clean_database()
for index_id, index_name in self.dependent_indexes.iteritems():
rng = get_records_range_for_index(index_id)
flush_count = 0
if not rng:
continue
write_message('Virtual index: %s is being reindexed for %s index' % \
(self.index_name, index_name))
chunks = chunk_generator(rng)
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
self.insert_index(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(list(chunk))
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
self.clean_queue_table(index_name)
else:
for index_id, index_name in self.dependent_indexes.iteritems():
query = """SELECT id_bibrec_low, id_bibrec_high, mode FROM %s
WHERE index_name=%%s
ORDER BY runtime ASC""" % \
(self.table_name[:-1] + "Q")
entries = self.remove_duplicates(run_sql(query, (index_name, )))
if entries:
write_message('Virtual index: %s is being updated for %s index' % \
(self.index_name, index_name))
for entry in entries:
operation = None
recID_low, recID_high, mode = entry
if mode == CFG_BIBINDEX_UPDATE_MODE["Update"]:
operation = self.update_index
elif mode == CFG_BIBINDEX_UPDATE_MODE["Remove"]:
operation = self.remove_index
elif mode == CFG_BIBINDEX_UPDATE_MODE["Insert"]:
operation = self.insert_index
flush_count = 0
chunks = chunk_generator([recID_low, recID_high])
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
operation(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(list(chunk))
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
self.clean_queue_table(index_name)
def retrieve_new_values_from_index(self, index_id, records_range):
"""
Retrieves new values from dependent index
for specific range of records.
@param index_id: id of the dependent index
@param records_range: the smallest and the biggest id
in the range: [id_low, id_high]
"""
tab_name = "idx" + self.table_type + ("%02d" % index_id) + "R"
query = """SELECT id_bibrec, termlist FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s""" % tab_name
new_regular_values = run_sql(query, (records_range[0], records_range[1]))
if new_regular_values:
zipped = zip(*new_regular_values)
new_regular_values = dict(zip(zipped[0], map(deserialize_via_marshal, zipped[1])))
else:
new_regular_values = dict()
return new_regular_values
def retrieve_old_values(self, records_range):
"""
Retrieves old values from database for this virtual index
for specific records range.
@param records_range: the smallest and the biggest id
in the range: [id_low, id_high]
"""
virtual_tab_name = self.table_name[:-1] + "R"
query = """SELECT id_bibrec, termlist FROM %s
WHERE type='CURRENT' AND
id_bibrec BETWEEN %%s AND %%s""" % virtual_tab_name
old_virtual_values = run_sql(query, (records_range[0], records_range[1]))
if old_virtual_values:
zipped = zip(*old_virtual_values)
old_virtual_values = dict(zip(zipped[0], map(deserialize_via_marshal, zipped[1])))
else:
old_virtual_values = dict()
return old_virtual_values
def update_index(self, index_id, recID_low, recID_high):
"""
Updates the state of virtual index for records in range:
recID_low, recID_high for index specified by index_id.
Function stores terms in idxWORD/PAIR/PHRASExxR tables with
prefixes for specific index, for example term 'ellis'
from author index will be stored in reversed table as:
'__author__ellis'. It allows fast operations on only part of terms
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
update_cache_for_record = self.update_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take new values
new_regular_values = self.retrieve_new_values_from_index(index_id, [recID_low, recID_high])
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
new_values = new_regular_values.get(recID) or []
old_values = old_virtual_values.get(recID) or []
to_serialize = update_cache_for_record(index_name, recID, old_values, new_values)
if len(to_serialize) == 0:
continue
run_sql("""INSERT INTO %s (id_bibrec,termlist,type)
VALUES (%%s,%%s,'FUTURE')""" % \
wash_table_column_name(virtual_tab_name),
(recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def insert_index(self, index_id, recID_low, recID_high):
"""
Inserts terms from dependent index to virtual table
without looking what's inside the virtual table and
what terms are being added. It's faster than 'updating',
but it can only be used when virtual table is free of
terms from this dependent index.
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
insert_to_cache_for_record = self.insert_to_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take new values
new_regular_values = self.retrieve_new_values_from_index(index_id, [recID_low, recID_high])
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
new_values = new_regular_values.get(recID) or []
old_values = old_virtual_values.get(recID) or []
to_serialize = insert_to_cache_for_record(index_name, recID, old_values, new_values)
if len(to_serialize) == 0:
continue
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def remove_index(self, index_id, recID_low, recID_high):
"""
Removes words found in dependent index from reversed
table of virtual index. Updates the state of the memory
(for future removal from forward table).
Takes into account that given words can be found in more
that one dependent index and it won't mark these words
for the removal process.
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
remove_from_cache_for_record = self.remove_from_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
old_values = old_virtual_values.get(recID) or []
to_serialize = remove_from_cache_for_record(index_name, recID, old_values)
if len(to_serialize) == 0:
continue
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def update_cache_for_record(self, index_name, recID, old_values, new_values):
"""
Updates memory (cache) with information on what to
remove/add/modify in forward table for specified record.
It also returns new terms which should be indexed for given record.
@param index_name: index name of dependent index
@param recID: considered record
@param old_values: all old values from all dependent indexes
for this virtual index for recID
@param new_values: new values from some dependent index
which should be added
"""
prefix = make_prefix(index_name)
put = self.put
new_values_prefix = [prefix + term for term in new_values]
part_values = []
tmp_old_values_prefix = []
# split old values from v.index into those with 'prefix' and those without
for term in old_values:
if term.startswith(prefix):
term_without_prefix = re.sub(re_prefix, '', term)
part_values.append(term_without_prefix)
put(recID, term_without_prefix, -1)
else:
tmp_old_values_prefix.append(term)
# remember not to remove words that occur more than once
part_values = set(part_values)
for value in tmp_old_values_prefix:
term_without_prefix = re.sub(re_prefix, '', value)
if term_without_prefix in part_values:
put(recID, term_without_prefix, 1)
for term_without_prefix in new_values:
put(recID, term_without_prefix, 1)
tmp_new_values_prefix = list(tmp_old_values_prefix)
tmp_new_values_prefix.extend(new_values_prefix)
return tmp_new_values_prefix
def insert_to_cache_for_record(self, index_name, recID, old_values, new_values):
"""
Updates cache with terms which should be inserted to database.
Used in insert_index function. See also: update_cache_for_record
which is analogous for update_index function.
"""
prefix = make_prefix(index_name)
append = old_values.append
put = self.put
for term in new_values:
append(prefix + term)
put(recID, term, 1)
return old_values
def remove_from_cache_for_record(self, index_name, recID, old_values):
"""
Updates information in cache with terms which should be removed
from virtual table. Used in remove_index function.
"""
prefix = make_prefix(index_name)
tmp_rest = []
tmp_removed = []
tmp_new_values = []
append_to_new = tmp_new_values.append
append_to_rest = tmp_rest.append
append_to_removed = tmp_removed.append
put = self.put
for term in old_values:
if term.startswith(prefix):
term_without_prefix = re.sub(re_prefix, '', term)
append_to_removed(term_without_prefix)
put(recID, term_without_prefix, -1)
else:
append_to_rest(re.sub(re_prefix, '', term))
append_to_new(term)
to_remember = set(tmp_rest) & set(tmp_removed)
for term_without_prefix in to_remember:
put(recID, term_without_prefix, 1)
return tmp_new_values
def clean_database(self):
"""Removes all entries from corresponding tables in database"""
query = """DELETE FROM %s""" % self.table_name
run_sql(query)
query = """DELETE FROM %s""" % self.table_name[:-1] + "R"
run_sql(query)
def clean_queue_table(self, index_name):
"""
Cleans queue table (i.e. idxWORD/PAIR/PHRASExxQ)
for specific index. It means that function will remove
all entries from db from queue table for this index.
"""
query = "DELETE FROM %s WHERE index_name='%s'" % \
(self.table_name[:-1].lstrip(self.table_prefix) + "Q",
index_name)
run_sql(query)
def remove_duplicates(self, entries):
"""
Removes duplicates from a list of entries (taken from Queue table)
in order to process a single command only once.
Queue table may look like this:
id (..) id_bibrec_low id_bibrec_high index_name mode
...
12 1 100 title update
13 1 100 title update
We don't want to perform the same operation twice. First we want to
squash the same commands into one.
@param entries: list of entries taken from the database
"""
unique = set()
return [entry for entry in entries if entry not in unique and not unique.add(entry)]
def remove_dependent_index(self, index_name):
"""
Removes dependent index from this virtual index.
It means removing all words from all records with prefix:
__index_name__ from reversed table, and removing some of
them from forward table if they don't appear in another
dependent index.
@param index_name: name of the dependent index to remove
"""
flush = 10000
dependent = self.dependent_indexes.values()
if len(dependent) == 0:
write_message("Specified index is not virtual...")
return
if index_name not in dependent:
write_message("Dependent index already removed...")
return
index_id = get_index_id_from_index_name(index_name)
records_range = get_records_range_for_index(index_id)
write_message("Removing an index: %s" % index_name)
if records_range:
flush_count = 0
chunks = chunk_generator([records_range[0], records_range[1]])
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
self.remove_index(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(chunk)
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
class WordTable(AbstractIndexTable):
"""
This class represents a single index table of regular index
(regular means it doesn't accumulates data from other indexes,
but it takes data directly from metadata of records which
are being indexed; for other type of index check: VirtualIndexTable).
To start indexing process one need to invoke add_recIDs() method.
For furher reading see description of this method.
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
"""Creates words table instance.
@param index_name: the index name
@param index_id: the index integer identificator
@param fields_to_index: a list of fields to index
@param table_type: type of the wordtable: Words, Pairs, Phrases
@param table_prefix: prefix for table name, indexing will be performed
on table: <<table_prefix>>idx<<wordtable_type>>XXF
@param wash_index_terms: do we wash index terms, and if yes (when >0),
how many characters do we keep in the index terms; see
max_char_length parameter of wash_index_term()
"""
AbstractIndexTable.__init__(self, index_name, table_type, table_prefix, wash_index_terms)
self.tags = get_index_tags(index_name, virtual=False)
self.nonmarc_tags = get_index_tags(index_name,
virtual=False,
tagtype="nonmarc")
self.timestamp = datetime.now()
self.virtual_indexes = get_index_virtual_indexes(self.index_id)
self.virtual_index_update_mode = CFG_BIBINDEX_UPDATE_MODE["Update"]
try:
self.stemming_language = get_index_stemming_language(self.index_id)
except KeyError:
self.stemming_language = ''
self.remove_stopwords = get_index_remove_stopwords(self.index_id)
self.remove_html_markup = get_index_remove_html_markup(self.index_id)
self.remove_latex_markup = get_index_remove_latex_markup(self.index_id)
self.tokenizer = get_index_tokenizer(self.index_id)(self.stemming_language,
self.remove_stopwords,
self.remove_html_markup,
self.remove_latex_markup)
self.tokenizer_type = detect_tokenizer_type(self.tokenizer)
self.default_tokenizer_function = self.tokenizer.get_tokenizing_function(table_type)
self.special_tags = self._handle_special_tags()
if self.stemming_language and self.table_name.startswith('idxWORD'):
write_message('%s has stemming enabled, language %s' % (self.table_name, self.stemming_language))
def _handle_special_tags(self):
"""
Fills in a dict with special tags which
always use the same tokenizer and this
tokenizer is independent of index.
"""
special_tags = {}
fields = self.tags + self.nonmarc_tags
for tag in fields:
if tag in CFG_BIBINDEX_SPECIAL_TAGS:
for t in CFG_BIBINDEX_INDEX_TABLE_TYPE:
if self.table_type == CFG_BIBINDEX_INDEX_TABLE_TYPE[t]:
tokenizer_name = CFG_BIBINDEX_SPECIAL_TAGS[tag][t]
tokenizer = _TOKENIZERS[tokenizer_name]
instance = tokenizer(self.stemming_language,
self.remove_stopwords,
self.remove_html_markup,
self.remove_latex_markup)
special_tags[tag] = instance.get_tokenizing_function(self.table_type)
break
return special_tags
def turn_off_virtual_indexes(self):
"""
Prevents from reindexing related virtual indexes.
"""
self.virtual_indexes = []
def turn_on_virtual_indexes(self):
"""
Turns on indexing related virtual indexes.
"""
self.virtual_indexes = get_index_virtual_indexes(self.index_id)
def get_field(self, recID, tag):
"""Returns list of values of the MARC-21 'tag' fields for the
record 'recID'."""
out = []
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id
AND tag LIKE %%s""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag))
for row in res:
out.append(row[0])
return out
def notify_virtual_indexes(self, recID_ranges):
"""
Informs all related virtual indexes about index change.
Function leaves information about the change for each index
in proper table in database (idxSOMETHINGxxQ).
@param recID_ranges: low and high recIDs of ranges
@type recID_ranges: list [[low_id1, high_id1], [low_id2, high_id2]...]
"""
query = """INSERT INTO %s (runtime, id_bibrec_low, id_bibrec_high, index_name, mode)
VALUES (%%s, %%s, %%s, %%s, %%s)"""
for index_id, index_name in self.virtual_indexes:
tab_name = "idx%s%02dQ" % (self.table_type, index_id)
full_query = query % tab_name
for recID_range in recID_ranges:
run_sql(full_query, (self.timestamp,
recID_range[0],
recID_range[1],
self.index_name,
self.virtual_index_update_mode))
def display(self):
"Displays the word table."
keys = self.value.keys()
keys.sort()
for k in keys:
write_message("%s: %s" % (k, self.value[k]))
def count(self):
"Returns the number of words in the table."
return len(self.value)
def info(self):
"Prints some information on the words table."
write_message("The words table contains %d words." % self.count())
def lookup_words(self, word=""):
"Lookup word from the words table."
if not word:
done = 0
while not done:
try:
word = raw_input("Enter word: ")
done = 1
except (EOFError, KeyboardInterrupt):
return
if word in self.value:
write_message("The word '%s' is found %d times." \
% (word, len(self.value[word])))
else:
write_message("The word '%s' does not exist in the word file."\
% word)
def add_recIDs(self, recIDs, opt_flush):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
global chunksize, _last_word_table
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
task_sleep_now_if_required()
# calculate chunk group of recIDs and treat it:
i_high = min(i_low + opt_flush - flush_count - 1, arange[1])
i_high = min(i_low + chunksize - chunksize_count - 1, i_high)
try:
self.chk_recID_range(i_low, i_high)
except StandardError:
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
raise
write_message(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % \
(self.table_name, i_low, i_high))
if CFG_CHECK_MYSQL_THREADS:
kill_sleepy_mysql_threads()
percentage_display = get_percentage_completed(records_done, records_to_go)
task_update_progress("(%s:%s) adding recs %d-%d %s" % (self.table_name, self.index_name, i_low, i_high, percentage_display))
self.del_recID_range(i_low, i_high)
just_processed = self.add_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + just_processed
write_message(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % \
(self.table_name, i_low, i_high))
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db()
self.clean()
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
write_message("%s backing up" % (self.table_name))
flush_count = 0
self.log_progress(time_started, records_done, records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db()
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
self.log_progress(time_started, records_done, records_to_go)
self.notify_virtual_indexes(recIDs)
def add_recID_range(self, recID1, recID2):
"""Add records from RECID1 to RECID2."""
wlist = {}
self.recIDs_in_mem.append([recID1, recID2])
# special case of author indexes where we also add author
# canonical IDs:
if self.index_name in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
for recID in range(recID1, recID2 + 1):
if recID not in wlist:
wlist[recID] = []
wlist[recID] = list_union(get_author_canonical_ids_for_recid(recID),
wlist[recID])
marc, nonmarc = self.find_nonmarc_records(recID1, recID2)
if marc and len(self.tags):
collector = TermCollector(self.tokenizer,
self.tokenizer_type,
self.table_type,
self.tags,
[recID1, recID2])
collector.set_special_tags(self.special_tags)
wlist = collector.collect(marc, wlist)
if nonmarc or (not len(self.tags) and len(self.nonmarc_tags)):
collector = NonmarcTermCollector(self.tokenizer,
self.tokenizer_type,
self.table_type,
self.nonmarc_tags,
[recID1, recID2])
collector.set_special_tags(self.special_tags)
toindex = nonmarc if len(self.tags) else marc
wlist = collector.collect(toindex, wlist)
# lookup index-time synonyms:
synonym_kbrs = get_all_synonym_knowledge_bases()
if self.index_name in synonym_kbrs:
if len(wlist) == 0: return 0
recIDs = wlist.keys()
for recID in recIDs:
for word in wlist[recID]:
word_synonyms = get_synonym_terms(word,
synonym_kbrs[self.index_name][0],
synonym_kbrs[self.index_name][1],
use_memoise=True)
if word_synonyms:
wlist[recID] = list_union(word_synonyms, wlist[recID])
# were there some words for these recIDs found?
recIDs = wlist.keys()
for recID in recIDs:
# was this record marked as deleted?
if "DELETED" in self.get_field(recID, "980__c"):
wlist[recID] = []
write_message("... record %d was declared deleted, removing its word list" % recID, verbose=9)
write_message("... record %d, termlist: %s" % (recID, wlist[recID]), verbose=9)
if len(wlist) == 0: return 0
# put words into reverse index table with FUTURE status:
for recID in recIDs:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(self.table_name[:-1]), (recID, serialize_via_marshal(wlist[recID]))) # kwalitee: disable=sql
# ... and, for new records, enter the CURRENT status as empty:
try:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(self.table_name[:-1]), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
# okay, it's an already existing record, no problem
pass
# put words into memory word list:
put = self.put
for recID in recIDs:
for w in wlist[recID]:
put(recID, w, 1)
return len(recIDs)
def find_nonmarc_records(self, recID1, recID2):
"""Divides recID range into two different tables,
first one contains only recIDs of the records that
are Marc type and the second one contains records
of nonMarc type"""
marc = range(recID1, recID2 + 1)
nonmarc = []
query = """SELECT id FROM %s WHERE master_format <> 'marc'
AND id BETWEEN %%s AND %%s""" % "bibrec"
res = run_sql(query, (recID1, recID2))
if res:
nonmarc = list(zip(*res)[0])
if len(nonmarc) == (recID2 - recID1 + 1):
nonmarc = xrange(recID1, recID2 + 1)
marc = []
else:
for recID in nonmarc:
marc.remove(recID)
else:
marc = xrange(recID1, recID2 + 1)
return [marc, nonmarc]
def log_progress(self, start, done, todo):
"""Calculate progress and store it.
start: start time,
done: records processed,
todo: total number of records"""
time_elapsed = time.time() - start
# consistency check
if time_elapsed == 0 or done > todo:
return
time_recs_per_min = done / (time_elapsed / 60.0)
write_message("%d records took %.1f seconds to complete.(%1.f recs/min)"\
% (done, time_elapsed, time_recs_per_min))
if time_recs_per_min:
write_message("Estimated runtime: %.1f minutes" % \
((todo - done) / time_recs_per_min))
def put(self, recID, word, sign):
"""Keeps track of changes done during indexing
and stores these changes in memory for further use.
Indexing process needs this information later while
filling in the database.
@param recID: recID of the record we want to update in memory
@param word: word we want to update
@param sing: sign of the word, 1 means keep this word in database,
-1 remove word from database
"""
value = self.value
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if word in self.value:
# the word 'word' exist already: update sign
value[word][recID] = sign
else:
value[word] = {recID: sign}
except:
write_message("Error: Cannot put word %s with sign %d for recID %s." % (word, sign, recID))
def del_recIDs(self, recIDs):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
count = 0
for arange in recIDs:
task_sleep_now_if_required()
self.del_recID_range(arange[0], arange[1])
count = count + arange[1] - arange[0]
self.virtual_index_update_mode = CFG_BIBINDEX_UPDATE_MODE["Remove"]
self.put_into_db()
self.notify_virtual_indexes(recIDs)
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
def del_recID_range(self, low, high):
"""Deletes records with 'recID' system number between low
and high from memory words index table."""
write_message("%s fetching existing words for records #%d-#%d started" % \
(self.table_name, low, high), verbose=3)
self.recIDs_in_mem.append([low, high])
query = """SELECT id_bibrec,termlist FROM %sR as bb WHERE bb.id_bibrec
BETWEEN %%s AND %%s""" % (self.table_name[:-1])
recID_rows = run_sql(query, (low, high))
for recID_row in recID_rows:
recID = recID_row[0]
wlist = deserialize_via_marshal(recID_row[1])
for word in wlist:
self.put(recID, word, -1)
write_message("%s fetching existing words for records #%d-#%d ended" % \
(self.table_name, low, high), verbose=3)
def check_bad_words(self):
"""
Finds bad words in reverse tables. Returns True in case of bad words.
"""
query = """SELECT 1 FROM %sR WHERE type IN ('TEMPORARY','FUTURE') LIMIT 1""" \
% (self.table_name[:-1],)
res = run_sql(query)
return bool(res)
def report_on_table_consistency(self):
"""Check reverse words index tables (e.g. idxWORD01R) for
interesting states such as 'TEMPORARY' state.
Prints small report (no of words, no of bad words).
"""
# find number of words:
query = """SELECT COUNT(1) FROM %s""" % (self.table_name)
res = run_sql(query, None, 1)
if res:
nb_words = res[0][0]
else:
nb_words = 0
# report stats:
write_message("%s contains %d words" % (self.table_name, nb_words))
# find possible bad states in reverse tables:
if self.check_bad_words():
write_message("EMERGENCY: %s needs to be repaired" %
(self.table_name, ))
else:
write_message("%s is in consistent state" % (self.table_name))
def repair(self, opt_flush):
"""Repair the whole table"""
# find possible bad states in reverse tables:
if not self.check_bad_words():
return
query = """SELECT id_bibrec FROM %sR WHERE type IN ('TEMPORARY','FUTURE')""" \
% (self.table_name[:-1])
res = intbitset(run_sql(query))
recIDs = create_range_list(list(res))
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
task_sleep_now_if_required()
# calculate chunk group of recIDs and treat it:
i_high = min(i_low + opt_flush - flush_count - 1, arange[1])
i_high = min(i_low + chunksize - chunksize_count - 1, i_high)
self.fix_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + i_high - i_low + 1
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db("emergency")
self.clean()
flush_count = 0
self.log_progress(time_started, records_done, records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db("emergency")
self.log_progress(time_started, records_done, records_to_go)
write_message("%s inconsistencies repaired." % self.table_name)
def chk_recID_range(self, low, high):
"""Check if the reverse index table is in proper state"""
## check db
query = """SELECT 1 FROM %sR WHERE type IN ('TEMPORARY','FUTURE')
AND id_bibrec BETWEEN %%s AND %%s LIMIT 1""" % self.table_name[:-1]
res = run_sql(query, (low, high), 1)
if not res:
write_message("%s for %d-%d is in consistent state" % (self.table_name, low, high))
return # okay, words table is consistent
## inconsistency detected!
write_message("EMERGENCY: %s inconsistencies detected..." % self.table_name)
error_message = "Errors found. You should check consistency of the " \
"%s - %sR tables.\nRunning 'bibindex --repair' is " \
"recommended." % (self.table_name, self.table_name[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError(error_message)
def fix_recID_range(self, low, high):
"""Try to fix reverse index database consistency
(e.g. table idxWORD01R) in the low,high doc-id range.
Possible states for a recID follow:
CUR TMP FUT: very bad things have happened: warn!
CUR TMP : very bad things have happened: warn!
CUR FUT: delete FUT (crash before flushing)
CUR : database is ok
TMP FUT: add TMP to memory and del FUT from memory
flush (revert to old state)
TMP : very bad things have happened: warn!
FUT: very bad things have happended: warn!
"""
state = {}
query = "SELECT id_bibrec,type FROM %sR WHERE id_bibrec BETWEEN %%s AND %%s"\
% self.table_name[:-1]
res = run_sql(query, (low, high))
for row in res:
if row[0] not in state:
state[row[0]] = []
state[row[0]].append(row[1])
ok = 1 # will hold info on whether we will be able to repair
for recID in state.keys():
if not 'TEMPORARY' in state[recID]:
if 'FUTURE' in state[recID]:
if 'CURRENT' not in state[recID]:
write_message("EMERGENCY: Index record %d is in inconsistent state. Can't repair it." % recID)
ok = 0
else:
write_message("EMERGENCY: Inconsistency in index record %d detected" % recID)
query = """DELETE FROM %sR
WHERE id_bibrec=%%s""" % self.table_name[:-1]
run_sql(query, (recID,))
write_message("EMERGENCY: Inconsistency in record %d repaired." % recID)
else:
if 'FUTURE' in state[recID] and not 'CURRENT' in state[recID]:
self.recIDs_in_mem.append([recID, recID])
# Get the words file
query = """SELECT type,termlist FROM %sR
WHERE id_bibrec=%%s""" % self.table_name[:-1]
write_message(query, verbose=9)
res = run_sql(query, (recID,))
for row in res:
wlist = deserialize_via_marshal(row[1])
write_message("Words are %s " % wlist, verbose=9)
if row[0] == 'TEMPORARY':
sign = 1
else:
sign = -1
for word in wlist:
self.put(recID, word, sign)
else:
write_message("EMERGENCY: %s for %d is in inconsistent "
"state. Couldn't repair it." % (self.table_name,
recID), stream=sys.stderr)
ok = 0
if not ok:
error_message = "Unrepairable errors found. You should check " \
"consistency of the %s - %sR tables. Deleting affected " \
"TEMPORARY and FUTURE entries from these tables is " \
"recommended; see the BibIndex Admin Guide." % \
(self.table_name, self.table_name[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError(error_message)
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibindex',
authorization_msg="BibIndex Task Submission",
description="""Examples:
\t%s -a -i 234-250,293,300-500 -u admin@localhost
\t%s -a -w author,fulltext -M 8192 -v3
\t%s -d -m +4d -A on --flush=10000\n""" % ((sys.argv[0],) * 3), help_specific_usage=""" Indexing options:
-a, --add\t\tadd or update words for selected records
-d, --del\t\tdelete words for selected records
-i, --id=low[-high]\t\tselect according to doc recID
-m, --modified=from[,to]\tselect according to modification date
-c, --collection=c1[,c2]\tselect according to collection
-R, --reindex\treindex the selected indexes from scratch
Repairing options:
-k, --check\t\tcheck consistency for all records in the table(s)
-r, --repair\t\ttry to repair all records in the table(s)
Specific options:
-w, --windex=w1[,w2]\tword/phrase indexes to consider (all)
-M, --maxmem=XXX\tmaximum memory usage in kB (no limit)
-f, --flush=NNN\t\tfull consistent table flush after NNN records (10000)
--force\t\tforce indexing of all records for provided indexes
-Z, --remove-dependent-index=w name of an index for removing from virtual index
-l --all-virtual\t\t set of all virtual indexes; the same as: -w virtual_ind1, virtual_ind2, ...
""",
version=__revision__,
specific_params=("adi:m:c:w:krRM:f:oZ:l", [
"add",
"del",
"id=",
"modified=",
"collection=",
"windex=",
"check",
"repair",
"reindex",
"maxmem=",
"flush=",
"force",
"remove-dependent-index=",
"all-virtual"
]),
task_stop_helper_fnc=task_stop_table_close_fnc,
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core,
task_submit_check_options_fnc=task_submit_check_options)
def task_submit_check_options():
"""Check for options compatibility."""
if task_get_option("reindex"):
if task_get_option("cmd") != "add" or task_get_option('id') or task_get_option('collection'):
print("ERROR: You can use --reindex only when adding modified record.", file=sys.stderr)
return False
return True
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
self.options['number'] = value
return True
return False
"""
if key in ("-a", "--add"):
task_set_option("cmd", "add")
if ("-x", "") in opts or ("--del", "") in opts:
raise StandardError("Can not have --add and --del at the same time!")
elif key in ("-k", "--check"):
task_set_option("cmd", "check")
elif key in ("-r", "--repair"):
task_set_option("cmd", "repair")
elif key in ("-d", "--del"):
task_set_option("cmd", "del")
elif key in ("-i", "--id"):
task_set_option('id', task_get_option('id') + split_ranges(value))
elif key in ("-m", "--modified"):
task_set_option("modified", get_date_range(value))
elif key in ("-c", "--collection"):
task_set_option("collection", value)
elif key in ("-R", "--reindex"):
task_set_option("reindex", True)
elif key in ("-w", "--windex"):
task_set_option("windex", value)
elif key in ("-M", "--maxmem"):
task_set_option("maxmem", int(value))
if task_get_option("maxmem") < base_process_size + 1000:
raise StandardError("Memory usage should be higher than %d kB" % \
(base_process_size + 1000))
elif key in ("-f", "--flush"):
task_set_option("flush", int(value))
elif key in ("-o", "--force"):
task_set_option("force", True)
elif key in ("-Z", "--remove-dependent-index",):
task_set_option("remove-dependent-index", value)
elif key in ("-l", "--all-virtual",):
task_set_option("all-virtual", True)
else:
return False
return True
def task_stop_table_close_fnc():
""" Close tables to STOP. """
global _last_word_table
if _last_word_table:
_last_word_table.put_into_db()
def get_recIDs_by_date_bibliographic(dates, index_name, force_all=False):
""" Finds records that were modified between DATES[0] and DATES[1]
for given index.
If DATES is not set, then finds records that were modified since
the last update of the index.
@param wordtable_type: can be 'Words', 'Pairs' or 'Phrases'
"""
index_id = get_index_id_from_index_name(index_name)
if not dates:
query = """SELECT last_updated FROM "idxINDEX" WHERE id=%s"""
res = run_sql(query, (index_id,))
if not res:
return set([])
if not res[0][0] or force_all:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
if dates[1] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b WHERE b.modification_date >= %s""",
(dates[0],)))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date AND
modification_date >= %s
AND status<>'DELETED'""",
(dates[0],)))
elif dates[0] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b WHERE b.modification_date <= %s""",
(dates[1],)))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date
AND modification_date <= %s
AND status<>'DELETED'""",
(dates[1],)))
else:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b
WHERE b.modification_date >= %s AND
b.modification_date <= %s""",
(dates[0], dates[1])))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date AND
modification_date >= %s AND
modification_date <= %s AND
status<>'DELETED'""",
(dates[0], dates[1],)))
return set(res)
def search_unit_in_bibrec(datetext1, datetext2, search_type='c'):
"""Return hitset of recIDs found that were either created or modified.
Search according to 'search_type' argument being 'c' or 'm' from datetext1
until datetext2, inclusive. Does not pay attention to pattern, collection,
anything. Useful to intersect later on with the 'real' query.
"""
from invenio.ext.sqlalchemy import db
from invenio.modules.records.models import Record
if datetext1 != datetext2:
datetext1 += '->' + datetext2
return intbitset(db.session.query(Record.id).filter(
*Record.filter_time_interval(datetext1, search_type)).all())
def get_recIDs_by_date_authority(dates, index_name, force_all=False):
""" Finds records that were modified between DATES[0] and DATES[1]
for given index.
If DATES is not set, then finds records that were modified since
the last update of the index.
Searches for bibliographic records connected to authority records
that have been changed.
"""
index_id = get_index_id_from_index_name(index_name)
index_tags = get_index_tags(index_name)
if not dates:
query = """SELECT last_updated FROM "idxINDEX" WHERE id=%s"""
res = run_sql(query, (index_id,))
if not res:
return set([])
if not res[0][0] or force_all:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
res = intbitset()
for tag in index_tags:
pattern = tag.replace('%', '*')
matches = fnmatch.filter(CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC.keys(), pattern)
if not len(matches):
continue
for tag_match in matches:
# get the type of authority record associated with this field
auth_type = CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC.get(tag_match)
# find updated authority records of this type
# dates[1] is ignored, needs dates[0] to find res
now = datetime.now()
auth_recIDs = search_pattern(p='980__a:' + auth_type) \
& search_unit_in_bibrec(str(dates[0]), str(now), search_type='m')
# now find dependent bibliographic records
for auth_recID in auth_recIDs:
# get the fix authority identifier of this authority record
control_nos = get_control_nos_from_recID(auth_recID)
# there may be multiple control number entries! (the '035' field is repeatable!)
for control_no in control_nos:
# get the bibrec IDs that refer to AUTHORITY_ID in TAG
tag_0 = tag_match[:5] + '0' # possibly do the same for '4' subfields ?
fieldvalue = '"' + control_no + '"'
res |= search_pattern(p=tag_0 + ':' + fieldvalue)
return set(res)
def get_not_updated_recIDs(modified_dates, indexes, force_all=False):
"""Finds not updated recIDs in database for indexes.
@param modified_dates: between this dates we should look for modified records
@type modified_dates: [date_old, date_new]
@param indexes: list of indexes
@type indexes: string separated by coma
@param force_all: if True all records will be taken
"""
found_recIDs = set()
write_message(CFG_BIBINDEX_UPDATE_MESSAGE)
for index in indexes:
found_recIDs |= get_recIDs_by_date_bibliographic(modified_dates, index, force_all)
found_recIDs |= get_recIDs_by_date_authority(modified_dates, index, force_all)
return list(sorted(found_recIDs))
def get_recIDs_from_cli(indexes=[]):
"""
Gets recIDs ranges from CLI for indexing when
user specified 'id' or 'collection' option or
search for modified recIDs for provided indexes
when recIDs are not specified.
@param indexes: it's a list of specified indexes, which
can be obtained from CLI with use of:
get_indexes_from_cli() function.
@type indexes: list of strings
"""
# need to first update idxINDEX table to find proper recIDs for reindexing
if task_get_option("reindex"):
for index_name in indexes:
run_sql("""UPDATE "idxINDEX" SET last_updated='1900-01-01 00:00:00'
WHERE name=%s""", (index_name,))
if task_get_option("id"):
return task_get_option("id")
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID, recID])
return recIDs_range
elif task_get_option("cmd") == "add":
recs = get_not_updated_recIDs(task_get_option("modified"),
indexes,
task_get_option("force"))
recIDs_range = beautify_range_list(create_range_list(recs))
return recIDs_range
return []
def get_indexes_from_cli():
"""
Gets indexes from CLI and checks if they are
valid. If indexes weren't specified function
will return all known indexes.
"""
indexes = task_get_option("windex")
all_virtual = task_get_option("all-virtual")
if all_virtual:
indexes = filter_for_virtual_indexes(get_all_indexes())
elif not indexes:
indexes = get_all_indexes()
else:
indexes = indexes.split(",")
indexes = remove_inexistent_indexes(indexes, leave_virtual=True)
return indexes
def remove_dependent_index(virtual_indexes, dependent_index):
"""
Removes dependent index from virtual indexes.
@param virtual_indexes: names of virtual_indexes
@type virtual_indexes: list of strings
@param dependent_index: name of dependent index
@type dependent_index: string
"""
if not virtual_indexes:
write_message("You should specify a name of a virtual index...")
return
id_dependent = get_index_id_from_index_name(dependent_index)
for index_name in virtual_indexes:
index_id = get_index_id_from_index_name(index_name)
for type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.itervalues():
vit = VirtualIndexTable(index_name, type_)
vit.remove_dependent_index(dependent_index)
task_sleep_now_if_required()
query = """DELETE FROM "idxINDEX_idxINDEX" WHERE id_virtual=%s AND id_normal=%s"""
run_sql(query, (index_id, id_dependent))
def should_update_virtual_indexes():
"""
Decides if any virtual indexes should be updated.
Decision is made based on arguments obtained
from CLI.
"""
return task_get_option("all-virtual") or task_get_option("windex")
def update_virtual_indexes(virtual_indexes, reindex=False):
"""
Function will update all specified virtual_indexes.
@param virtual_indexes: list of index names
@param reindex: shall we reindex given v.indexes from scratch?
"""
kwargs = {}
if reindex:
kwargs.update({'table_prefix': 'tmp_'})
for index_name in virtual_indexes:
if reindex:
index_id = get_index_id_from_index_name(index_name)
init_temporary_reindex_tables(index_id)
for key, type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.iteritems():
kwargs.update({'wash_index_terms': CFG_BIBINDEX_WASH_INDEX_TERMS[key]})
vit = VirtualIndexTable(index_name, type_, **kwargs)
vit.set_reindex_mode()
vit.run_update()
swap_temporary_reindex_tables(index_id)
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
else:
for key, type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.iteritems():
kwargs.update({'wash_index_terms': CFG_BIBINDEX_WASH_INDEX_TERMS[key]})
vit = VirtualIndexTable(index_name, type_, **kwargs)
vit.run_update()
task_sleep_now_if_required(can_stop_too=True)
def task_run_core():
"""Runs the task by fetching arguments from the BibSched task queue.
This is what BibSched will be invoking via daemon call.
"""
global _last_word_table
indexes = get_indexes_from_cli()
if len(indexes) == 0:
write_message("Specified indexes can't be found.")
return True
virtual_indexes = filter_for_virtual_indexes(indexes)
regular_indexes = list(set(indexes) - set(virtual_indexes))
# check tables consistency
if task_get_option("cmd") == "check":
for index_name in indexes:
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Pairs"],
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Phrases"],
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
_last_word_table = None
return True
# virtual index: remove dependent index
if task_get_option("remove-dependent-index"):
remove_dependent_index(indexes,
task_get_option("remove-dependent-index"))
return True
# virtual index: update
if should_update_virtual_indexes():
update_virtual_indexes(virtual_indexes, task_get_option("reindex"))
if len(regular_indexes) == 0:
return True
# regular index: initialization for Words,Pairs,Phrases
recIDs_range = get_recIDs_from_cli(regular_indexes)
recIDs_for_index = find_affected_records_for_index(regular_indexes,
recIDs_range,
(task_get_option("force") or \
task_get_option("reindex") or \
task_get_option("cmd") == "del"))
if len(recIDs_for_index.keys()) == 0:
write_message("Selected indexes/recIDs are up to date.")
# Let's work on single words!
for index_name in recIDs_for_index.keys():
index_id = get_index_id_from_index_name(index_name)
reindex_prefix = ""
if task_get_option("reindex"):
reindex_prefix = "tmp_"
init_temporary_reindex_tables(index_id, reindex_prefix)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
table_prefix=reindex_prefix,
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError as e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception(alert_admin=True)
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on pairs now
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Pairs"],
table_prefix=reindex_prefix,
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError as e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on phrases now
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Phrases"],
table_prefix=reindex_prefix,
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
if not task_get_option("id") and not task_get_option("collection"):
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError as e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
if task_get_option("reindex"):
swap_temporary_reindex_tables(index_id, reindex_prefix)
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
# update modification date also for indexes that were up to date
if not task_get_option("id") and not task_get_option("collection") and \
task_get_option("cmd") == "add":
up_to_date = set(indexes) - set(recIDs_for_index.keys())
update_index_last_updated(list(up_to_date), task_get_task_param('task_starting_time'))
_last_word_table = None
return True
### okay, here we go:
if __name__ == '__main__':
main()
| 1 | 16,277 | can you check if you can use `sqlalchemy.exc.IntegrityError` instead? | inveniosoftware-invenio | py |
@@ -144,7 +144,7 @@ void JobManager::scheduleThread() {
// @return: true if all task dispatched, else false
bool JobManager::runJobInternal(const JobDescription& jobDesc, JbOp op) {
- std::lock_guard<std::mutex> lk(muJobFinished_);
+ std::lock_guard<std::recursive_mutex> lk(muJobFinished_);
std::unique_ptr<JobExecutor> je =
JobExecutorFactory::createJobExecutor(jobDesc, kvStore_, adminClient_);
JobExecutor* jobExec = je.get(); | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "meta/processors/job/JobManager.h"
#include <folly/synchronization/Baton.h>
#include <gtest/gtest.h>
#include <thrift/lib/cpp/util/EnumUtils.h>
#include <boost/stacktrace.hpp>
#include "common/http/HttpClient.h"
#include "common/stats/StatsManager.h"
#include "common/time/WallClock.h"
#include "common/utils/MetaKeyUtils.h"
#include "interface/gen-cpp2/common_types.h"
#include "kvstore/Common.h"
#include "kvstore/KVIterator.h"
#include "meta/common/MetaCommon.h"
#include "meta/processors/Common.h"
#include "meta/processors/admin/AdminClient.h"
#include "meta/processors/job/BalancePlan.h"
#include "meta/processors/job/JobStatus.h"
#include "meta/processors/job/JobUtils.h"
#include "meta/processors/job/TaskDescription.h"
#include "webservice/Common.h"
DEFINE_int32(job_check_intervals, 5000, "job intervals in us");
DEFINE_double(job_expired_secs, 7 * 24 * 60 * 60, "job expired intervals in sec");
using nebula::kvstore::KVIterator;
namespace nebula {
namespace meta {
stats::CounterId kNumRunningJobs;
JobManager* JobManager::getInstance() {
static JobManager inst;
return &inst;
}
bool JobManager::init(nebula::kvstore::KVStore* store) {
if (store == nullptr) {
return false;
}
if (status_.load(std::memory_order_acquire) != JbmgrStatus::NOT_START) {
return false;
}
kvStore_ = store;
lowPriorityQueue_ = std::make_unique<folly::UMPSCQueue<std::pair<JbOp, JobID>, true>>();
highPriorityQueue_ = std::make_unique<folly::UMPSCQueue<std::pair<JbOp, JobID>, true>>();
status_.store(JbmgrStatus::IDLE, std::memory_order_release);
if (handleRemainingJobs() != nebula::cpp2::ErrorCode::SUCCEEDED) {
return false;
}
bgThread_ = std::thread(&JobManager::scheduleThread, this);
LOG(INFO) << "JobManager initialized";
return true;
}
JobManager::~JobManager() {
shutDown();
}
nebula::cpp2::ErrorCode JobManager::handleRemainingJobs() {
std::unique_ptr<kvstore::KVIterator> iter;
auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter);
if (retCode == nebula::cpp2::ErrorCode::E_LEADER_CHANGED) {
LOG(INFO) << "Not leader, skip reading remaining jobs";
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Can't find jobs, error: " << apache::thrift::util::enumNameSafe(retCode);
return retCode;
}
std::vector<JobDescription> jds;
for (; iter->valid(); iter->next()) {
if (!JobDescription::isJobKey(iter->key())) {
continue;
}
auto optJobRet = JobDescription::makeJobDescription(iter->key(), iter->val());
if (nebula::ok(optJobRet)) {
auto optJob = nebula::value(optJobRet);
std::unique_ptr<JobExecutor> je =
JobExecutorFactory::createJobExecutor(optJob, kvStore_, adminClient_);
// Only balance has been recovered
if (optJob.getStatus() == cpp2::JobStatus::RUNNING && je->isMetaJob()) {
jds.emplace_back(optJob);
}
}
}
for (auto& jd : jds) {
jd.setStatus(cpp2::JobStatus::QUEUE, true);
save(jd.jobKey(), jd.jobVal());
}
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
void JobManager::shutDown() {
LOG(INFO) << "JobManager::shutDown() begin";
if (status_.load(std::memory_order_acquire) ==
JbmgrStatus::STOPPED) { // in case of shutdown more than once
LOG(INFO) << "JobManager not running, exit";
return;
}
status_.store(JbmgrStatus::STOPPED, std::memory_order_release);
bgThread_.join();
LOG(INFO) << "JobManager::shutDown() end";
}
void JobManager::scheduleThread() {
LOG(INFO) << "JobManager::runJobBackground() enter";
while (status_.load(std::memory_order_acquire) != JbmgrStatus::STOPPED) {
std::pair<JbOp, JobID> opJobId;
while (status_.load(std::memory_order_acquire) == JbmgrStatus::BUSY || !try_dequeue(opJobId)) {
if (status_.load(std::memory_order_acquire) == JbmgrStatus::STOPPED) {
LOG(INFO) << "[JobManager] detect shutdown called, exit";
break;
}
usleep(FLAGS_job_check_intervals);
}
auto jobDescRet = JobDescription::loadJobDescription(opJobId.second, kvStore_);
if (!nebula::ok(jobDescRet)) {
LOG(ERROR) << "[JobManager] load an invalid job from queue " << opJobId.second;
continue; // leader change or archive happened
}
auto jobDesc = nebula::value(jobDescRet);
if (!jobDesc.setStatus(cpp2::JobStatus::RUNNING, opJobId.first == JbOp::RECOVER)) {
LOG(INFO) << "[JobManager] skip job " << opJobId.second;
continue;
}
save(jobDesc.jobKey(), jobDesc.jobVal());
compareChangeStatus(JbmgrStatus::IDLE, JbmgrStatus::BUSY);
if (!runJobInternal(jobDesc, opJobId.first)) {
jobFinished(opJobId.second, cpp2::JobStatus::FAILED);
}
}
}
// @return: true if all task dispatched, else false
bool JobManager::runJobInternal(const JobDescription& jobDesc, JbOp op) {
std::lock_guard<std::mutex> lk(muJobFinished_);
std::unique_ptr<JobExecutor> je =
JobExecutorFactory::createJobExecutor(jobDesc, kvStore_, adminClient_);
JobExecutor* jobExec = je.get();
runningJobs_.emplace(jobDesc.getJobId(), std::move(je));
if (jobExec == nullptr) {
LOG(ERROR) << "unreconized job cmd " << apache::thrift::util::enumNameSafe(jobDesc.getCmd());
return false;
}
if (jobDesc.getStatus() == cpp2::JobStatus::STOPPED) {
jobExec->stop();
return true;
}
if (!jobExec->check()) {
LOG(ERROR) << "Job Executor check failed";
return false;
}
if (jobExec->prepare() != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Job Executor prepare failed";
return false;
}
if (op == JbOp::RECOVER) {
jobExec->recovery();
}
if (jobExec->isMetaJob()) {
jobExec->setFinishCallBack([this, jobDesc](meta::cpp2::JobStatus status) {
if (status == meta::cpp2::JobStatus::STOPPED) {
std::lock_guard<std::mutex> lkg(muJobFinished_);
cleanJob(jobDesc.getJobId());
return nebula::cpp2::ErrorCode::SUCCEEDED;
} else {
return jobFinished(jobDesc.getJobId(), status);
}
});
}
if (jobExec->execute() != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Job dispatch failed";
return false;
}
return true;
}
void JobManager::cleanJob(JobID jobId) {
// Delete the job after job finished or failed
LOG(INFO) << "[task] cleanJob " << jobId;
auto it = inFlightJobs_.find(jobId);
if (it != inFlightJobs_.end()) {
inFlightJobs_.erase(it);
}
auto itr = runningJobs_.find(jobId);
if (itr != runningJobs_.end()) {
runningJobs_.erase(itr);
}
}
nebula::cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus jobStatus) {
LOG(INFO) << folly::sformat(
"{}, jobId={}, result={}", __func__, jobId, apache::thrift::util::enumNameSafe(jobStatus));
// normal job finish may race to job stop
std::lock_guard<std::mutex> lk(muJobFinished_);
auto optJobDescRet = JobDescription::loadJobDescription(jobId, kvStore_);
if (!nebula::ok(optJobDescRet)) {
LOG(WARNING) << folly::sformat("can't load job, jobId={}", jobId);
if (jobStatus != cpp2::JobStatus::STOPPED) {
// there is a rare condition, that when job finished,
// the job description is deleted(default more than a week)
// but stop an invalid job should not set status to idle.
compareChangeStatus(JbmgrStatus::BUSY, JbmgrStatus::IDLE);
}
return nebula::error(optJobDescRet);
}
auto optJobDesc = nebula::value(optJobDescRet);
if (!optJobDesc.setStatus(jobStatus)) {
// job already been set as finished, failed or stopped
return nebula::cpp2::ErrorCode::E_SAVE_JOB_FAILURE;
}
compareChangeStatus(JbmgrStatus::BUSY, JbmgrStatus::IDLE);
auto rc = save(optJobDesc.jobKey(), optJobDesc.jobVal());
if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) {
return rc;
}
auto it = runningJobs_.find(jobId);
if (it == runningJobs_.end()) {
LOG(WARNING) << folly::sformat("can't find jobExecutor, jobId={}", jobId);
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
std::unique_ptr<JobExecutor>& jobExec = it->second;
if (!optJobDesc.getParas().empty()) {
auto spaceName = optJobDesc.getParas().back();
auto spaceIdRet = getSpaceId(spaceName);
if (!nebula::ok(spaceIdRet)) {
auto retCode = nebula::error(spaceIdRet);
LOG(INFO) << "Get spaceName " << spaceName
<< " failed, error: " << apache::thrift::util::enumNameSafe(retCode);
return retCode;
}
auto spaceId = nebula::value(spaceIdRet);
if (spaceId == -1) {
return nebula::cpp2::ErrorCode::E_STORE_FAILURE;
}
jobExec->setSpaceId(spaceId);
}
if (jobStatus == cpp2::JobStatus::STOPPED) {
jobExec->stop();
if (!jobExec->isMetaJob()) {
cleanJob(jobId);
}
} else {
jobExec->finish(jobStatus == cpp2::JobStatus::FINISHED);
cleanJob(jobId);
}
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
nebula::cpp2::ErrorCode JobManager::saveTaskStatus(TaskDescription& td,
const cpp2::ReportTaskReq& req) {
auto code = req.get_code();
auto status = code == nebula::cpp2::ErrorCode::SUCCEEDED ? cpp2::JobStatus::FINISHED
: cpp2::JobStatus::FAILED;
td.setStatus(status);
auto jobId = req.get_job_id();
auto optJobDescRet = JobDescription::loadJobDescription(jobId, kvStore_);
if (!nebula::ok(optJobDescRet)) {
auto retCode = nebula::error(optJobDescRet);
LOG(WARNING) << "LoadJobDesc failed, jobId " << jobId
<< " error: " << apache::thrift::util::enumNameSafe(retCode);
return retCode;
}
auto optJobDesc = nebula::value(optJobDescRet);
auto jobExec = JobExecutorFactory::createJobExecutor(optJobDesc, kvStore_, adminClient_);
if (!jobExec) {
LOG(WARNING) << folly::sformat("createMetaJobExecutor failed(), jobId={}", jobId);
return nebula::cpp2::ErrorCode::E_TASK_REPORT_OUT_DATE;
}
auto rcSave = save(td.taskKey(), td.taskVal());
if (rcSave != nebula::cpp2::ErrorCode::SUCCEEDED) {
return rcSave;
}
if (!optJobDesc.getParas().empty()) {
GraphSpaceID spaceId = -1;
auto spaceName = optJobDesc.getParas().back();
auto spaceIdRet = getSpaceId(spaceName);
if (!nebula::ok(spaceIdRet)) {
auto retCode = nebula::error(spaceIdRet);
LOG(WARNING) << "Get spaceName " << spaceName
<< " failed, error: " << apache::thrift::util::enumNameSafe(retCode);
} else {
spaceId = nebula::value(spaceIdRet);
jobExec->setSpaceId(spaceId);
}
}
return jobExec->saveSpecialTaskStatus(req);
}
void JobManager::compareChangeStatus(JbmgrStatus expected, JbmgrStatus desired) {
JbmgrStatus ex = expected;
status_.compare_exchange_strong(ex, desired, std::memory_order_acq_rel);
}
/**
* @brief
* client should retry if any persist attempt
* for example leader change / store failure.
* else, may log then ignore error
* @return cpp2::ErrorCode
*/
nebula::cpp2::ErrorCode JobManager::reportTaskFinish(const cpp2::ReportTaskReq& req) {
auto jobId = req.get_job_id();
auto taskId = req.get_task_id();
// only an active job manager will accept task finish report
if (status_.load(std::memory_order_acquire) == JbmgrStatus::STOPPED ||
status_.load(std::memory_order_acquire) == JbmgrStatus::NOT_START) {
LOG(INFO) << folly::sformat(
"report to an in-active job manager, job={}, task={}", jobId, taskId);
return nebula::cpp2::ErrorCode::E_UNKNOWN;
}
// because the last task will update the job's status
// tasks should report once a time
std::lock_guard<std::mutex> lk(muReportFinish_);
auto tasksRet = getAllTasks(jobId);
if (!nebula::ok(tasksRet)) {
return nebula::error(tasksRet);
}
auto tasks = nebula::value(tasksRet);
auto task = std::find_if(tasks.begin(), tasks.end(), [&](auto& it) {
return it.getJobId() == jobId && it.getTaskId() == taskId;
});
if (task == tasks.end()) {
LOG(WARNING) << folly::sformat(
"report an invalid or outdate task, will ignore this report, job={}, "
"task={}",
jobId,
taskId);
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
auto rc = saveTaskStatus(*task, req);
if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) {
return rc;
}
auto allTaskFinished = std::none_of(tasks.begin(), tasks.end(), [](auto& tsk) {
return tsk.status_ == cpp2::JobStatus::RUNNING;
});
if (allTaskFinished) {
auto jobStatus = std::all_of(tasks.begin(),
tasks.end(),
[](auto& tsk) { return tsk.status_ == cpp2::JobStatus::FINISHED; })
? cpp2::JobStatus::FINISHED
: cpp2::JobStatus::FAILED;
return jobFinished(jobId, jobStatus);
}
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
ErrorOr<nebula::cpp2::ErrorCode, std::list<TaskDescription>> JobManager::getAllTasks(JobID jobId) {
std::list<TaskDescription> taskDescriptions;
auto jobKey = JobDescription::makeJobKey(jobId);
std::unique_ptr<kvstore::KVIterator> iter;
auto rc = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, jobKey, &iter);
if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) {
return rc;
}
for (; iter->valid(); iter->next()) {
if (JobDescription::isJobKey(iter->key())) {
continue;
}
taskDescriptions.emplace_back(TaskDescription(iter->key(), iter->val()));
}
return taskDescriptions;
}
nebula::cpp2::ErrorCode JobManager::addJob(const JobDescription& jobDesc, AdminClient* client) {
auto rc = save(jobDesc.jobKey(), jobDesc.jobVal());
if (rc == nebula::cpp2::ErrorCode::SUCCEEDED) {
auto jobId = jobDesc.getJobId();
enqueue(JbOp::ADD, jobId, jobDesc.getCmd());
// Add job to jobMap
inFlightJobs_.emplace(jobId, jobDesc);
} else {
LOG(ERROR) << "Add Job Failed";
if (rc != nebula::cpp2::ErrorCode::E_LEADER_CHANGED) {
rc = nebula::cpp2::ErrorCode::E_ADD_JOB_FAILURE;
}
return rc;
}
adminClient_ = client;
return nebula::cpp2::ErrorCode::SUCCEEDED;
}
size_t JobManager::jobSize() const {
return highPriorityQueue_->size() + lowPriorityQueue_->size();
}
bool JobManager::try_dequeue(std::pair<JbOp, JobID>& opJobId) {
if (highPriorityQueue_->try_dequeue(opJobId)) {
return true;
} else if (lowPriorityQueue_->try_dequeue(opJobId)) {
return true;
}
return false;
}
void JobManager::enqueue(const JbOp& op, const JobID& jobId, const cpp2::AdminCmd& cmd) {
if (cmd == cpp2::AdminCmd::STATS) {
highPriorityQueue_->enqueue(std::make_pair(op, jobId));
} else {
lowPriorityQueue_->enqueue(std::make_pair(op, jobId));
}
}
ErrorOr<nebula::cpp2::ErrorCode, std::vector<cpp2::JobDesc>> JobManager::showJobs(
const std::string& spaceName) {
std::unique_ptr<kvstore::KVIterator> iter;
auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter);
if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Fetch Jobs Failed, error: " << apache::thrift::util::enumNameSafe(retCode);
return retCode;
}
int32_t lastExpiredJobId = INT_MIN;
std::vector<std::string> expiredJobKeys;
std::vector<cpp2::JobDesc> ret;
for (; iter->valid(); iter->next()) {
auto jobKey = iter->key();
if (JobDescription::isJobKey(jobKey)) {
auto optJobRet = JobDescription::makeJobDescription(jobKey, iter->val());
if (!nebula::ok(optJobRet)) {
expiredJobKeys.emplace_back(jobKey);
continue;
}
auto optJob = nebula::value(optJobRet);
// skip expired job, default 1 week
auto jobDesc = optJob.toJobDesc();
if (isExpiredJob(jobDesc)) {
lastExpiredJobId = jobDesc.get_id();
LOG(INFO) << "remove expired job " << lastExpiredJobId;
expiredJobKeys.emplace_back(jobKey);
continue;
}
if (jobDesc.get_paras().back() != spaceName) {
continue;
}
ret.emplace_back(jobDesc);
} else { // iter-key() is a TaskKey
TaskDescription task(jobKey, iter->val());
if (task.getJobId() == lastExpiredJobId) {
expiredJobKeys.emplace_back(jobKey);
}
}
}
retCode = removeExpiredJobs(std::move(expiredJobKeys));
if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Remove Expired Jobs Failed";
return retCode;
}
std::sort(
ret.begin(), ret.end(), [](const auto& a, const auto& b) { return a.get_id() > b.get_id(); });
return ret;
}
bool JobManager::isExpiredJob(const cpp2::JobDesc& jobDesc) {
if (*jobDesc.status_ref() == cpp2::JobStatus::QUEUE ||
*jobDesc.status_ref() == cpp2::JobStatus::RUNNING) {
return false;
}
auto jobStart = jobDesc.get_start_time();
auto duration = std::difftime(nebula::time::WallClock::fastNowInSec(), jobStart);
return duration > FLAGS_job_expired_secs;
}
bool JobManager::isRunningJob(const JobDescription& jobDesc) {
auto status = jobDesc.getStatus();
if (status == cpp2::JobStatus::QUEUE || status == cpp2::JobStatus::RUNNING) {
return true;
}
return false;
}
nebula::cpp2::ErrorCode JobManager::removeExpiredJobs(
std::vector<std::string>&& expiredJobsAndTasks) {
nebula::cpp2::ErrorCode ret;
folly::Baton<true, std::atomic> baton;
kvStore_->asyncMultiRemove(kDefaultSpaceId,
kDefaultPartId,
std::move(expiredJobsAndTasks),
[&](nebula::cpp2::ErrorCode code) {
if (code != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "kvstore asyncRemoveRange failed: "
<< apache::thrift::util::enumNameSafe(code);
}
ret = code;
baton.post();
});
baton.wait();
return ret;
}
bool JobManager::checkJobExist(const cpp2::AdminCmd& cmd,
const std::vector<std::string>& paras,
JobID& iJob) {
JobDescription jobDesc(0, cmd, paras);
auto it = inFlightJobs_.begin();
while (it != inFlightJobs_.end()) {
if (it->second == jobDesc) {
iJob = it->first;
return true;
}
++it;
}
return false;
}
ErrorOr<nebula::cpp2::ErrorCode, std::pair<cpp2::JobDesc, std::vector<cpp2::TaskDesc>>>
JobManager::showJob(JobID iJob, const std::string& spaceName) {
auto jobKey = JobDescription::makeJobKey(iJob);
std::unique_ptr<kvstore::KVIterator> iter;
auto rc = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, jobKey, &iter);
if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) {
return rc;
}
if (!iter->valid()) {
return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND;
}
std::pair<cpp2::JobDesc, std::vector<cpp2::TaskDesc>> ret;
for (; iter->valid(); iter->next()) {
auto jKey = iter->key();
if (JobDescription::isJobKey(jKey)) {
auto optJobRet = JobDescription::makeJobDescription(jKey, iter->val());
if (!nebula::ok(optJobRet)) {
return nebula::error(optJobRet);
}
auto optJob = nebula::value(optJobRet);
if (optJob.getParas().back() != spaceName) {
LOG(WARNING) << "Show job " << iJob << " not in current space " << spaceName;
return nebula::cpp2::ErrorCode::E_JOB_NOT_IN_SPACE;
}
ret.first = optJob.toJobDesc();
} else {
TaskDescription td(jKey, iter->val());
ret.second.emplace_back(td.toTaskDesc());
}
}
if (ret.first.get_cmd() == meta::cpp2::AdminCmd::DATA_BALANCE ||
ret.first.get_cmd() == meta::cpp2::AdminCmd::ZONE_BALANCE) {
auto res = BalancePlan::show(iJob, kvStore_, adminClient_);
if (ok(res)) {
std::vector<cpp2::BalanceTask> thriftTasks = value(res);
auto& vec = ret.first.paras_ref<>().value();
size_t index = vec.size();
for (const auto& t : thriftTasks) {
std::string resVal;
apache::thrift::CompactSerializer::serialize(t, &resVal);
auto& val = ret.first.paras_ref<>().value();
val.emplace_back(resVal);
}
vec.emplace_back(std::to_string(index));
}
}
return ret;
}
nebula::cpp2::ErrorCode JobManager::stopJob(JobID iJob, const std::string& spaceName) {
LOG(INFO) << "try to stop job " << iJob;
auto optJobDescRet = JobDescription::loadJobDescription(iJob, kvStore_);
if (!nebula::ok(optJobDescRet)) {
auto retCode = nebula::error(optJobDescRet);
LOG(WARNING) << "LoadJobDesc failed, jobId " << iJob
<< " error: " << apache::thrift::util::enumNameSafe(retCode);
return retCode;
}
auto optJobDesc = nebula::value(optJobDescRet);
if (optJobDesc.getParas().back() != spaceName) {
LOG(WARNING) << "Stop job " << iJob << " not in space " << spaceName;
return nebula::cpp2::ErrorCode::E_JOB_NOT_IN_SPACE;
}
return jobFinished(iJob, cpp2::JobStatus::STOPPED);
}
/*
* Return: recovered job num.
* */
ErrorOr<nebula::cpp2::ErrorCode, uint32_t> JobManager::recoverJob(
const std::string& spaceName, AdminClient* client, const std::vector<int32_t>& jobIds) {
int32_t recoveredJobNum = 0;
std::vector<std::pair<std::string, std::string>> kvs;
adminClient_ = client;
if (jobIds.empty()) {
std::unique_ptr<kvstore::KVIterator> iter;
auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter);
if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Can't find jobs, error: " << apache::thrift::util::enumNameSafe(retCode);
return retCode;
}
for (; iter->valid(); iter->next()) {
if (!JobDescription::isJobKey(iter->key())) {
continue;
}
kvs.emplace_back(std::make_pair(iter->key(), iter->val()));
}
} else {
std::vector<std::string> keys;
keys.reserve(jobIds.size());
for (int jobId : jobIds) {
keys.emplace_back(JobDescription::makeJobKey(jobId));
}
std::vector<std::string> values;
auto retCode = kvStore_->multiGet(kDefaultSpaceId, kDefaultPartId, keys, &values);
if (retCode.first != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Can't find jobs, error: " << apache::thrift::util::enumNameSafe(retCode.first);
return retCode.first;
}
for (size_t i = 0; i < keys.size(); i++) {
kvs.emplace_back(std::make_pair(keys[i], values[i]));
}
}
for (const std::pair<std::string, std::string>& p : kvs) {
auto optJobRet = JobDescription::makeJobDescription(p.first, p.second);
if (nebula::ok(optJobRet)) {
auto optJob = nebula::value(optJobRet);
if (optJob.getParas().back() != spaceName) {
continue;
}
if (optJob.getStatus() == cpp2::JobStatus::QUEUE ||
(jobIds.size() && (optJob.getStatus() == cpp2::JobStatus::FAILED ||
optJob.getStatus() == cpp2::JobStatus::STOPPED))) {
// Check if the job exists
JobID jId = 0;
auto jobExist = checkJobExist(optJob.getCmd(), optJob.getParas(), jId);
if (!jobExist) {
auto jobId = optJob.getJobId();
enqueue(JbOp::RECOVER, jobId, optJob.getCmd());
inFlightJobs_.emplace(jobId, optJob);
++recoveredJobNum;
}
}
}
}
return recoveredJobNum;
}
nebula::cpp2::ErrorCode JobManager::save(const std::string& k, const std::string& v) {
std::vector<kvstore::KV> data{std::make_pair(k, v)};
folly::Baton<true, std::atomic> baton;
auto rc = nebula::cpp2::ErrorCode::SUCCEEDED;
kvStore_->asyncMultiPut(
kDefaultSpaceId, kDefaultPartId, std::move(data), [&](nebula::cpp2::ErrorCode code) {
rc = code;
baton.post();
});
baton.wait();
return rc;
}
ErrorOr<nebula::cpp2::ErrorCode, GraphSpaceID> JobManager::getSpaceId(const std::string& name) {
auto indexKey = MetaKeyUtils::indexSpaceKey(name);
std::string val;
auto retCode = kvStore_->get(kDefaultSpaceId, kDefaultPartId, indexKey, &val);
if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) {
if (retCode == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) {
retCode = nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND;
}
LOG(ERROR) << "KVStore error: " << apache::thrift::util::enumNameSafe(retCode);
return retCode;
}
return *reinterpret_cast<const GraphSpaceID*>(val.c_str());
}
ErrorOr<nebula::cpp2::ErrorCode, bool> JobManager::checkIndexJobRunning() {
std::unique_ptr<kvstore::KVIterator> iter;
auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter);
if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) {
LOG(ERROR) << "Fetch Jobs Failed, error: " << apache::thrift::util::enumNameSafe(retCode);
return retCode;
}
for (; iter->valid(); iter->next()) {
auto jobKey = iter->key();
if (JobDescription::isJobKey(jobKey)) {
auto optJobRet = JobDescription::makeJobDescription(jobKey, iter->val());
if (!nebula::ok(optJobRet)) {
continue;
}
auto jobDesc = nebula::value(optJobRet);
if (!isRunningJob(jobDesc)) {
continue;
}
auto cmd = jobDesc.getCmd();
if (cmd == cpp2::AdminCmd::REBUILD_TAG_INDEX || cmd == cpp2::AdminCmd::REBUILD_EDGE_INDEX) {
return true;
}
}
}
return false;
}
} // namespace meta
} // namespace nebula
| 1 | 33,190 | Why we need to use recursive_mutex here? | vesoft-inc-nebula | cpp |
@@ -68,8 +68,7 @@ var _ = framework.CertManagerDescribe("ACME Certificate (HTTP01)", func() {
validations := f.Helper().ValidationSetForUnsupportedFeatureSet(unsupportedFeatures)
BeforeEach(func() {
- acmeIssuer := util.NewCertManagerACMEIssuer(issuerName, f.Config.Addons.ACMEServer.URL, testingACMEEmail, testingACMEPrivateKey)
- acmeIssuer.Spec.ACME.Solvers = []cmacme.ACMEChallengeSolver{
+ solvers := []cmacme.ACMEChallengeSolver{
{
HTTP01: &cmacme.ACMEChallengeSolverHTTP01{
Ingress: &cmacme.ACMEChallengeSolverHTTP01Ingress{ | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificate
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
cmacme "github.com/jetstack/cert-manager/pkg/apis/acme/v1"
v1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
cmutil "github.com/jetstack/cert-manager/pkg/util"
"github.com/jetstack/cert-manager/test/e2e/framework"
"github.com/jetstack/cert-manager/test/e2e/framework/helper/featureset"
"github.com/jetstack/cert-manager/test/e2e/framework/log"
. "github.com/jetstack/cert-manager/test/e2e/framework/matcher"
frameworkutil "github.com/jetstack/cert-manager/test/e2e/framework/util"
"github.com/jetstack/cert-manager/test/e2e/util"
"github.com/jetstack/cert-manager/test/unit/gen"
)
const testingACMEEmail = "[email protected]"
const testingACMEPrivateKey = "test-acme-private-key"
const foreverTestTimeout = time.Second * 60
var _ = framework.CertManagerDescribe("ACME Certificate (HTTP01)", func() {
f := framework.NewDefaultFramework("create-acme-certificate-http01")
h := f.Helper()
var acmeIngressDomain string
issuerName := "test-acme-issuer"
certificateName := "test-acme-certificate"
certificateSecretName := "test-acme-certificate"
// fixedIngressName is the name of an ingress resource that is configured
// with a challenge solve.
// To utilise this solver, add the 'testing.cert-manager.io/fixed-ingress: "true"' label.
fixedIngressName := "testingress"
// ACME Issuer does not return a ca.crt. See:
// https://github.com/jetstack/cert-manager/issues/1571
unsupportedFeatures := featureset.NewFeatureSet(featureset.SaveCAToSecret)
validations := f.Helper().ValidationSetForUnsupportedFeatureSet(unsupportedFeatures)
BeforeEach(func() {
acmeIssuer := util.NewCertManagerACMEIssuer(issuerName, f.Config.Addons.ACMEServer.URL, testingACMEEmail, testingACMEPrivateKey)
acmeIssuer.Spec.ACME.Solvers = []cmacme.ACMEChallengeSolver{
{
HTTP01: &cmacme.ACMEChallengeSolverHTTP01{
Ingress: &cmacme.ACMEChallengeSolverHTTP01Ingress{
Class: &f.Config.Addons.IngressController.IngressClass,
},
},
},
{
Selector: &cmacme.CertificateDNSNameSelector{
MatchLabels: map[string]string{
"testing.cert-manager.io/fixed-ingress": "true",
},
},
HTTP01: &cmacme.ACMEChallengeSolverHTTP01{
Ingress: &cmacme.ACMEChallengeSolverHTTP01Ingress{
Name: fixedIngressName,
},
},
},
}
By("Creating an Issuer")
_, err := f.CertManagerClientSet.CertmanagerV1().Issuers(f.Namespace.Name).Create(context.TODO(), acmeIssuer, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Issuer to become Ready")
err = util.WaitForIssuerCondition(f.CertManagerClientSet.CertmanagerV1().Issuers(f.Namespace.Name),
issuerName,
v1.IssuerCondition{
Type: v1.IssuerConditionReady,
Status: cmmeta.ConditionTrue,
})
Expect(err).NotTo(HaveOccurred())
By("Verifying the ACME account URI is set")
err = util.WaitForIssuerStatusFunc(f.CertManagerClientSet.CertmanagerV1().Issuers(f.Namespace.Name),
issuerName,
func(i *v1.Issuer) (bool, error) {
if i.GetStatus().ACMEStatus().URI == "" {
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
By("Verifying ACME account private key exists")
secret, err := f.KubeClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), testingACMEPrivateKey, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if len(secret.Data) != 1 {
Fail("Expected 1 key in ACME account private key secret, but there was %d", len(secret.Data))
}
})
JustBeforeEach(func() {
acmeIngressDomain = frameworkutil.RandomSubdomain(f.Config.Addons.IngressController.Domain)
})
AfterEach(func() {
By("Cleaning up")
f.CertManagerClientSet.CertmanagerV1().Issuers(f.Namespace.Name).Delete(context.TODO(), issuerName, metav1.DeleteOptions{})
f.KubeClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), testingACMEPrivateKey, metav1.DeleteOptions{})
})
It("should obtain a signed certificate with a single CN from the ACME server", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames(acmeIngressDomain),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should obtain a signed ecdsa certificate with a single CN from the ACME server", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{
Name: issuerName,
}),
gen.SetCertificateDNSNames(acmeIngressDomain),
gen.SetCertificateKeyAlgorithm(v1.ECDSAKeyAlgorithm),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should obtain a signed certificate for a long domain using http01 validation", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
// the maximum length of a single segment of the domain being requested
const maxLengthOfDomainSegment = 63
By("Creating a Certificate")
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames(acmeIngressDomain, fmt.Sprintf("%s.%s", cmutil.RandStringRunes(maxLengthOfDomainSegment), acmeIngressDomain)),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should obtain a signed certificate with a CN and single subdomain as dns name from the ACME server", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames(fmt.Sprintf("%s.%s", cmutil.RandStringRunes(5), acmeIngressDomain)),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Verifying the Certificate is valid")
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should allow updating an existing certificate with a new dns name", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames(fmt.Sprintf("%s.%s", cmutil.RandStringRunes(5), acmeIngressDomain)),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Verifying the Certificate is valid")
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
By("Getting the latest version of the Certificate")
cert, err = certClient.Get(context.TODO(), certificateName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Adding an additional dnsName to the Certificate")
newDNSName := fmt.Sprintf("%s.%s", cmutil.RandStringRunes(5), acmeIngressDomain)
cert.Spec.DNSNames = append(cert.Spec.DNSNames, newDNSName)
By("Updating the Certificate in the apiserver")
cert, err = certClient.Update(context.TODO(), cert, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be not ready")
_, err = h.WaitForCertificateNotReady(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to become ready & valid")
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should allow updating the dns name of a failing certificate that had an incorrect dns name", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a failing Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames("google.com"),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Making sure the Order failed with a 400 since google.com is invalid")
order := &cmacme.Order{}
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (done bool, err error) {
orders, err := listOwnedOrders(f.CertManagerClientSet, cert)
Expect(err).NotTo(HaveOccurred())
if len(orders) == 0 || len(orders) > 1 {
log.Logf("Waiting as one Order should exist, but we found %d", len(orders))
return false, nil
}
order = orders[0]
expected := `400 urn:ietf:params:acme:error:rejectedIdentifier`
if !strings.Contains(order.Status.Reason, expected) {
log.Logf("Waiting for Order's reason, current: %s, should contain: %s", order.Status.Reason, expected)
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be not ready")
_, err = h.WaitForCertificateNotReady(f.Namespace.Name, certificateName, 30*time.Second)
Expect(err).NotTo(HaveOccurred())
By("Getting the latest version of the Certificate")
cert, err = certClient.Get(context.TODO(), certificateName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Replacing dnsNames with a valid dns name")
cert.Spec.DNSNames = []string{fmt.Sprintf("%s.%s", cmutil.RandStringRunes(5), acmeIngressDomain)}
_, err = certClient.Update(context.TODO(), cert, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to have the Ready=True condition")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Sanity checking the issued Certificate")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
By("Checking that the secret contains this dns name")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, func(cert *v1.Certificate, secret *corev1.Secret) error {
dnsnames, err := findDNSNames(secret)
if err != nil {
return err
}
Expect(cert.Spec.DNSNames).To(ContainElements(dnsnames))
return nil
})
Expect(err).NotTo(HaveOccurred())
})
It("should fail to obtain a certificate for an invalid ACME dns name", func() {
// create test fixture
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames("google.com"),
)
cert.Namespace = f.Namespace.Name
cert, err := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name).Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
notReadyCondition := v1.CertificateCondition{
Type: v1.CertificateConditionReady,
Status: cmmeta.ConditionFalse,
}
Eventually(cert, "30s", "1s").Should(HaveCondition(f, notReadyCondition))
Consistently(cert, "1m", "10s").Should(HaveCondition(f, notReadyCondition))
})
It("should obtain a signed certificate with a single CN from the ACME server when putting an annotation on an ingress resource", func() {
ingClient := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace.Name)
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating an Ingress with the issuer name annotation set")
_, err := ingClient.Create(context.TODO(), util.NewIngress(certificateSecretName, certificateSecretName, map[string]string{
"cert-manager.io/issuer": issuerName,
}, acmeIngressDomain), metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for Certificate to exist")
err = util.WaitForCertificateToExist(certClient, certificateSecretName, foreverTestTimeout)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should obtain a signed certificate with a single CN from the ACME server when redirected", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
// force-ssl-redirect should make every request turn into a redirect,
// but I haven't been able to make this happen. Create a TLS cert via
// the self-sign issuer to make it have a "proper" TLS cert
_, err := f.CertManagerClientSet.CertmanagerV1().Issuers(f.Namespace.Name).Create(context.TODO(), util.NewCertManagerSelfSignedIssuer("selfsign"), metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for (self-sign) Issuer to become Ready")
err = util.WaitForIssuerCondition(f.CertManagerClientSet.CertmanagerV1().Issuers(f.Namespace.Name),
issuerName,
v1.IssuerCondition{
Type: v1.IssuerConditionReady,
Status: cmmeta.ConditionTrue,
})
Expect(err).NotTo(HaveOccurred())
const dummycert = "dummy-tls"
const secretname = "dummy-tls-secret"
selfcert := util.NewCertManagerBasicCertificate("dummy-tls", secretname, "selfsign", v1.IssuerKind, nil, nil, acmeIngressDomain)
_, err = certClient.Create(context.TODO(), selfcert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, dummycert, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, dummycert, validations...)
Expect(err).NotTo(HaveOccurred())
// create an ingress that points at nothing, but has the TLS redirect annotation set
// using the TLS secret that we just got from the self-sign
ingress := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace.Name)
_, err = ingress.Create(context.TODO(), &networkingv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: fixedIngressName,
Annotations: map[string]string{
"nginx.ingress.kubernetes.io/force-ssl-redirect": "true",
"kubernetes.io/ingress.class": "nginx",
},
},
Spec: networkingv1beta1.IngressSpec{
TLS: []networkingv1beta1.IngressTLS{
{
Hosts: []string{acmeIngressDomain},
SecretName: secretname,
},
},
Rules: []networkingv1beta1.IngressRule{
{
Host: acmeIngressDomain,
IngressRuleValue: networkingv1beta1.IngressRuleValue{
HTTP: &networkingv1beta1.HTTPIngressRuleValue{
Paths: []networkingv1beta1.HTTPIngressPath{
{
Path: "/",
Backend: networkingv1beta1.IngressBackend{
ServiceName: "doesnotexist",
ServicePort: intstr.FromInt(443),
},
},
},
},
},
},
},
},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating a Certificate")
// This is a special cert for the test suite, where we specify an ingress rather than a
// class
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames(acmeIngressDomain),
)
cert.Namespace = f.Namespace.Name
cert.Labels = map[string]string{
"testing.cert-manager.io/fixed-ingress": "true",
}
_, err = certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should automatically recreate challenge pod and still obtain a certificate if it is manually deleted", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames(acmeIngressDomain),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("killing the solver pod")
podClient := f.KubeClientSet.CoreV1().Pods(f.Namespace.Name)
var pod corev1.Pod
err = wait.PollImmediate(1*time.Second, time.Minute,
func() (bool, error) {
log.Logf("Waiting for solver pod to exist")
podlist, err := podClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
for _, p := range podlist.Items {
log.Logf("solver pod %s", p.Name)
// TODO(dmo): make this cleaner instead of just going by name
if strings.Contains(p.Name, "http-solver") {
pod = p
return true, nil
}
}
return false, nil
},
)
Expect(err).NotTo(HaveOccurred())
err = podClient.Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
// The pod should get remade and the certificate should be made valid.
// Killing the pod could potentially make the validation invalid if pebble
// were to ask us for the challenge after the pod was killed, but because
// we kill it so early, we should always be in the self-check phase
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should obtain a signed certificate with a single IP Address from the ACME server", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateIPs(f.Config.Addons.ACMEServer.IngressIP),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should obtain a signed certificate with an IP and DNS names from the ACME server", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames(fmt.Sprintf("%s.%s", cmutil.RandStringRunes(2), acmeIngressDomain)),
gen.SetCertificateIPs(f.Config.Addons.ACMEServer.IngressIP),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
It("should allow updating an existing certificate with a new dns name", func() {
certClient := f.CertManagerClientSet.CertmanagerV1().Certificates(f.Namespace.Name)
By("Creating a Certificate")
cert := gen.Certificate(certificateName,
gen.SetCertificateSecretName(certificateSecretName),
gen.SetCertificateIssuer(cmmeta.ObjectReference{Name: issuerName}),
gen.SetCertificateDNSNames(fmt.Sprintf("%s.%s", cmutil.RandStringRunes(5), acmeIngressDomain)),
)
cert.Namespace = f.Namespace.Name
_, err := certClient.Create(context.TODO(), cert, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
By("Getting the latest version of the Certificate")
cert, err = certClient.Get(context.TODO(), certificateName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Adding an additional dnsName to the Certificate")
newDNSName := fmt.Sprintf("%s.%s", cmutil.RandStringRunes(5), acmeIngressDomain)
cert.Spec.DNSNames = append(cert.Spec.DNSNames, newDNSName)
By("Updating the Certificate in the apiserver")
cert, err = certClient.Update(context.TODO(), cert, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be not ready")
_, err = h.WaitForCertificateNotReady(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the Certificate to be issued...")
err = f.Helper().WaitCertificateIssued(f.Namespace.Name, certificateName, time.Minute*5)
Expect(err).NotTo(HaveOccurred())
By("Validating the issued Certificate...")
err = f.Helper().ValidateCertificate(f.Namespace.Name, certificateName, validations...)
Expect(err).NotTo(HaveOccurred())
})
})
// findDNSNames decodes and returns the dns names (SANs) contained in a
// certificate secret.
func findDNSNames(s *corev1.Secret) ([]string, error) {
if s.Data == nil {
return nil, fmt.Errorf("secret contains no data")
}
pkData := s.Data[corev1.TLSPrivateKeyKey]
certData := s.Data[corev1.TLSCertKey]
if len(pkData) == 0 || len(certData) == 0 {
return nil, fmt.Errorf("missing data in CA secret")
}
cert, err := tls.X509KeyPair(certData, pkData)
if err != nil {
return nil, fmt.Errorf("failed to parse data in CA secret: %w", err)
}
x509Cert, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("internal error parsing x509 certificate: %w", err)
}
return x509Cert.DNSNames, nil
}
| 1 | 26,104 | am I correct these changes are related to: > I have removed a bunch of legacy functions for issuer generation ... if so, I can see why that's a valuable change but this is already a pretty huge PR and these changes to use `gen` here feel quite distant from the goal of this specific PR; they should maybe be in their own PR, to make everything easier to review? or have I missed how these changes are linked to this PR? | jetstack-cert-manager | go |
@@ -19,8 +19,10 @@ package org.openqa.selenium.grid.graphql;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableList;
+
import org.openqa.selenium.grid.data.DistributorStatus;
import org.openqa.selenium.grid.distributor.Distributor;
+import org.openqa.selenium.grid.sessionmap.SessionMap;
import org.openqa.selenium.internal.Require;
import java.net.URI; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.graphql;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableList;
import org.openqa.selenium.grid.data.DistributorStatus;
import org.openqa.selenium.grid.distributor.Distributor;
import org.openqa.selenium.internal.Require;
import java.net.URI;
import java.util.List;
import java.util.function.Supplier;
public class Grid {
private final URI uri;
private final Supplier<DistributorStatus> distributorStatus;
public Grid(Distributor distributor, URI uri) {
Require.nonNull("Distributor", distributor);
this.uri = Require.nonNull("Grid's public URI", uri);
this.distributorStatus = Suppliers.memoize(distributor::getStatus);
}
public URI getUri() {
return uri;
}
public List<Node> getNodes() {
return distributorStatus.get().getNodes().stream()
.map(summary -> new Node(summary.getNodeId(),
summary.getUri(),
summary.isUp(),
summary.getMaxSessionCount(),
summary.getStereotypes()))
.collect(ImmutableList.toImmutableList());
}
public int getTotalSlots() {
return distributorStatus.get().getNodes().stream()
.map(summary -> {
int slotCount = summary.getStereotypes().values().stream().mapToInt(i -> i).sum();
return Math.min(summary.getMaxSessionCount(), slotCount);
})
.mapToInt(i -> i)
.sum();
}
public int getUsedSlots() {
return distributorStatus.get().getNodes().stream()
.map(summary -> summary.getUsedStereotypes().values().stream().mapToInt(i -> i).sum())
.mapToInt(i -> i)
.sum();
}
}
| 1 | 17,778 | I think that this is an unused import | SeleniumHQ-selenium | java |
@@ -328,7 +328,7 @@ public class DdiExportUtil {
private static void writeVersionStatement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException{
xmlw.writeStartElement("verStmt");
- writeAttribute(xmlw,"source","DVN");
+ writeAttribute(xmlw,"source","producer");
xmlw.writeStartElement("version");
writeAttribute(xmlw,"date", datasetVersionDTO.getReleaseTime().substring(0, 10));
writeAttribute(xmlw,"type", datasetVersionDTO.getVersionState().toString()); | 1 | package edu.harvard.iq.dataverse.export.ddi;
import com.google.gson.Gson;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.DataTable;
import edu.harvard.iq.dataverse.DatasetFieldConstant;
import edu.harvard.iq.dataverse.DatasetVersion;
import edu.harvard.iq.dataverse.FileMetadata;
import edu.harvard.iq.dataverse.GlobalId;
import edu.harvard.iq.dataverse.api.dto.DatasetDTO;
import edu.harvard.iq.dataverse.api.dto.DatasetVersionDTO;
import edu.harvard.iq.dataverse.api.dto.FieldDTO;
import edu.harvard.iq.dataverse.api.dto.FileDTO;
import edu.harvard.iq.dataverse.api.dto.MetadataBlockDTO;
import edu.harvard.iq.dataverse.datavariable.VariableMetadata;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
import edu.harvard.iq.dataverse.datavariable.VariableServiceBean;
import edu.harvard.iq.dataverse.datavariable.VariableRange;
import edu.harvard.iq.dataverse.datavariable.SummaryStatistic;
import edu.harvard.iq.dataverse.datavariable.VariableCategory;
import edu.harvard.iq.dataverse.datavariable.VarGroup;
import edu.harvard.iq.dataverse.datavariable.CategoryMetadata;
import static edu.harvard.iq.dataverse.export.DDIExportServiceBean.LEVEL_FILE;
import static edu.harvard.iq.dataverse.export.DDIExportServiceBean.NOTE_SUBJECT_TAG;
import static edu.harvard.iq.dataverse.export.DDIExportServiceBean.NOTE_SUBJECT_UNF;
import static edu.harvard.iq.dataverse.export.DDIExportServiceBean.NOTE_TYPE_TAG;
import static edu.harvard.iq.dataverse.export.DDIExportServiceBean.NOTE_TYPE_UNF;
import edu.harvard.iq.dataverse.export.DDIExporter;
import static edu.harvard.iq.dataverse.util.SystemConfig.FQDN;
import static edu.harvard.iq.dataverse.util.SystemConfig.SITE_URL;
import edu.harvard.iq.dataverse.util.json.JsonUtil;
import edu.harvard.iq.dataverse.util.xml.XmlPrinter;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.ejb.EJB;
import javax.json.JsonObject;
import javax.xml.stream.XMLOutputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamWriter;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.FactoryConfigurationError;
import javax.xml.parsers.ParserConfigurationException;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import org.w3c.dom.Document;
import org.w3c.dom.DOMException;
// For write operation
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.TransformerConfigurationException;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamSource;
import javax.xml.transform.stream.StreamResult;
import java.io.File;
import java.io.InputStream;
import java.io.InputStreamReader;
public class DdiExportUtil {
private static final Logger logger = Logger.getLogger(DdiExportUtil.class.getCanonicalName());
public static final String NOTE_TYPE_TERMS_OF_USE = "DVN:TOU";
public static final String NOTE_TYPE_TERMS_OF_ACCESS = "DVN:TOA";
public static final String NOTE_TYPE_DATA_ACCESS_PLACE = "DVN:DAP";
public static final String LEVEL_DV = "dv";
@EJB
VariableServiceBean variableService;
public static final String NOTE_TYPE_CONTENTTYPE = "DATAVERSE:CONTENTTYPE";
public static final String NOTE_SUBJECT_CONTENTTYPE = "Content/MIME Type";
public static String datasetDtoAsJson2ddi(String datasetDtoAsJson) {
logger.fine(JsonUtil.prettyPrint(datasetDtoAsJson));
Gson gson = new Gson();
DatasetDTO datasetDto = gson.fromJson(datasetDtoAsJson, DatasetDTO.class);
try {
return dto2ddi(datasetDto);
} catch (XMLStreamException ex) {
Logger.getLogger(DdiExportUtil.class.getName()).log(Level.SEVERE, null, ex);
return null;
}
}
// "short" ddi, without the "<fileDscr>" and "<dataDscr>/<var>" sections:
public static void datasetJson2ddi(JsonObject datasetDtoAsJson, OutputStream outputStream) throws XMLStreamException {
logger.fine(JsonUtil.prettyPrint(datasetDtoAsJson.toString()));
Gson gson = new Gson();
DatasetDTO datasetDto = gson.fromJson(datasetDtoAsJson.toString(), DatasetDTO.class);
dtoddi(datasetDto, outputStream);
}
private static String dto2ddi(DatasetDTO datasetDto) throws XMLStreamException {
OutputStream outputStream = new ByteArrayOutputStream();
dtoddi(datasetDto, outputStream);
String xml = outputStream.toString();
return XmlPrinter.prettyPrintXml(xml);
}
private static void dtoddi(DatasetDTO datasetDto, OutputStream outputStream) throws XMLStreamException {
XMLStreamWriter xmlw = XMLOutputFactory.newInstance().createXMLStreamWriter(outputStream);
xmlw.writeStartElement("codeBook");
xmlw.writeDefaultNamespace("ddi:codebook:2_5");
xmlw.writeAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance");
xmlw.writeAttribute("xsi:schemaLocation", DDIExporter.DEFAULT_XML_NAMESPACE + " " + DDIExporter.DEFAULT_XML_SCHEMALOCATION);
writeAttribute(xmlw, "version", DDIExporter.DEFAULT_XML_VERSION);
createStdyDscr(xmlw, datasetDto);
createOtherMats(xmlw, datasetDto.getDatasetVersion().getFiles());
xmlw.writeEndElement(); // codeBook
xmlw.flush();
}
// "full" ddi, with the the "<fileDscr>" and "<dataDscr>/<var>" sections:
public static void datasetJson2ddi(JsonObject datasetDtoAsJson, DatasetVersion version, OutputStream outputStream) throws XMLStreamException {
logger.fine(JsonUtil.prettyPrint(datasetDtoAsJson.toString()));
Gson gson = new Gson();
DatasetDTO datasetDto = gson.fromJson(datasetDtoAsJson.toString(), DatasetDTO.class);
XMLStreamWriter xmlw = XMLOutputFactory.newInstance().createXMLStreamWriter(outputStream);
xmlw.writeStartElement("codeBook");
xmlw.writeDefaultNamespace("ddi:codebook:2_5");
xmlw.writeAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance");
xmlw.writeAttribute("xsi:schemaLocation", DDIExporter.DEFAULT_XML_NAMESPACE + " " + DDIExporter.DEFAULT_XML_SCHEMALOCATION);
writeAttribute(xmlw, "version", DDIExporter.DEFAULT_XML_VERSION);
createStdyDscr(xmlw, datasetDto);
createFileDscr(xmlw, version);
createDataDscr(xmlw, version);
createOtherMatsFromFileMetadatas(xmlw, version.getFileMetadatas());
xmlw.writeEndElement(); // codeBook
xmlw.flush();
}
/**
* @todo This is just a stub, copied from DDIExportServiceBean. It should
* produce valid DDI based on
* http://guides.dataverse.org/en/latest/developers/tools.html#msv but it is
* incomplete and will be worked on as part of
* https://github.com/IQSS/dataverse/issues/2579 . We'll want to reference
* the DVN 3.x code for creating a complete DDI.
*
* @todo Rename this from "study" to "dataset".
*/
private static void createStdyDscr(XMLStreamWriter xmlw, DatasetDTO datasetDto) throws XMLStreamException {
DatasetVersionDTO version = datasetDto.getDatasetVersion();
String persistentProtocol = datasetDto.getProtocol();
String persistentAgency = persistentProtocol;
// The "persistentAgency" tag is used for the "agency" attribute of the
// <IDNo> ddi section; back in the DVN3 days we used "handle" and "DOI"
// for the 2 supported protocols, respectively. For the sake of backward
// compatibility, we should probably stick with these labels: (-- L.A. 4.5)
if ("hdl".equals(persistentAgency)) {
persistentAgency = "handle";
} else if ("doi".equals(persistentAgency)) {
persistentAgency = "DOI";
}
String persistentAuthority = datasetDto.getAuthority();
String persistentId = datasetDto.getIdentifier();
//docDesc Block
writeDocDescElement (xmlw, datasetDto);
//stdyDesc Block
xmlw.writeStartElement("stdyDscr");
xmlw.writeStartElement("citation");
xmlw.writeStartElement("titlStmt");
writeFullElement(xmlw, "titl", dto2Primitive(version, DatasetFieldConstant.title));
writeFullElement(xmlw, "subTitl", dto2Primitive(version, DatasetFieldConstant.subTitle));
writeFullElement(xmlw, "altTitl", dto2Primitive(version, DatasetFieldConstant.alternativeTitle));
xmlw.writeStartElement("IDNo");
writeAttribute(xmlw, "agency", persistentAgency);
xmlw.writeCharacters(persistentProtocol + ":" + persistentAuthority + "/" + persistentId);
xmlw.writeEndElement(); // IDNo
writeOtherIdElement(xmlw, version);
xmlw.writeEndElement(); // titlStmt
writeAuthorsElement(xmlw, version);
writeProducersElement(xmlw, version);
xmlw.writeStartElement("distStmt");
if (datasetDto.getPublisher() != null && !datasetDto.getPublisher().equals("")) {
xmlw.writeStartElement("distrbtr");
writeAttribute(xmlw, "source", "archive");
xmlw.writeCharacters(datasetDto.getPublisher());
xmlw.writeEndElement(); //distrbtr
}
writeDistributorsElement(xmlw, version);
writeContactsElement(xmlw, version);
writeFullElement(xmlw, "distDate", dto2Primitive(version, DatasetFieldConstant.distributionDate));
writeFullElement(xmlw, "depositr", dto2Primitive(version, DatasetFieldConstant.depositor));
writeFullElement(xmlw, "depDate", dto2Primitive(version, DatasetFieldConstant.dateOfDeposit));
xmlw.writeEndElement(); // diststmt
writeSeriesElement(xmlw, version);
xmlw.writeEndElement(); // citation
//End Citation Block
//Start Study Info Block
// Study Info
xmlw.writeStartElement("stdyInfo");
writeSubjectElement(xmlw, version); //Subject and Keywords
writeAbstractElement(xmlw, version); // Description
writeSummaryDescriptionElement(xmlw, version);
writeFullElement(xmlw, "notes", dto2Primitive(version, DatasetFieldConstant.notesText));
////////
xmlw.writeEndElement(); // stdyInfo
writeMethodElement(xmlw, version);
writeDataAccess(xmlw , version);
writeOtherStudyMaterial(xmlw , version);
writeFullElement(xmlw, "notes", dto2Primitive(version, DatasetFieldConstant.datasetLevelErrorNotes));
xmlw.writeEndElement(); // stdyDscr
}
private static void writeOtherStudyMaterial(XMLStreamWriter xmlw , DatasetVersionDTO version) throws XMLStreamException {
xmlw.writeStartElement("othrStdyMat");
writeFullElementList(xmlw, "relMat", dto2PrimitiveList(version, DatasetFieldConstant.relatedMaterial));
writeFullElementList(xmlw, "relStdy", dto2PrimitiveList(version, DatasetFieldConstant.relatedDatasets));
writeRelPublElement(xmlw, version);
writeFullElementList(xmlw, "othRefs", dto2PrimitiveList(version, DatasetFieldConstant.otherReferences));
xmlw.writeEndElement(); //othrStdyMat
}
private static void writeDataAccess(XMLStreamWriter xmlw , DatasetVersionDTO version) throws XMLStreamException {
xmlw.writeStartElement("dataAccs");
if (version.getTermsOfUse() != null && !version.getTermsOfUse().trim().equals("")) {
xmlw.writeStartElement("notes");
writeAttribute(xmlw, "type", NOTE_TYPE_TERMS_OF_USE);
writeAttribute(xmlw, "level", LEVEL_DV);
xmlw.writeCharacters(version.getTermsOfUse());
xmlw.writeEndElement(); //notes
}
if (version.getTermsOfAccess() != null && !version.getTermsOfAccess().trim().equals("")) {
xmlw.writeStartElement("notes");
writeAttribute(xmlw, "type", NOTE_TYPE_TERMS_OF_ACCESS);
writeAttribute(xmlw, "level", LEVEL_DV);
xmlw.writeCharacters(version.getTermsOfAccess());
xmlw.writeEndElement(); //notes
}
xmlw.writeStartElement("setAvail");
writeFullElement(xmlw, "accsPlac", version.getDataAccessPlace());
writeFullElement(xmlw, "origArch", version.getOriginalArchive());
writeFullElement(xmlw, "avlStatus", version.getAvailabilityStatus());
writeFullElement(xmlw, "collSize", version.getSizeOfCollection());
writeFullElement(xmlw, "complete", version.getStudyCompletion());
xmlw.writeEndElement(); //setAvail
xmlw.writeStartElement("useStmt");
writeFullElement(xmlw, "confDec", version.getConfidentialityDeclaration());
writeFullElement(xmlw, "specPerm", version.getSpecialPermissions());
writeFullElement(xmlw, "restrctn", version.getRestrictions());
writeFullElement(xmlw, "contact", version.getContactForAccess());
writeFullElement(xmlw, "citReq", version.getCitationRequirements());
writeFullElement(xmlw, "deposReq", version.getDepositorRequirements());
writeFullElement(xmlw, "conditions", version.getConditions());
writeFullElement(xmlw, "disclaimer", version.getDisclaimer());
xmlw.writeEndElement(); //useStmt
xmlw.writeEndElement(); //dataAccs
}
private static void writeDocDescElement (XMLStreamWriter xmlw, DatasetDTO datasetDto) throws XMLStreamException {
DatasetVersionDTO version = datasetDto.getDatasetVersion();
String persistentProtocol = datasetDto.getProtocol();
String persistentAgency = persistentProtocol;
// The "persistentAgency" tag is used for the "agency" attribute of the
// <IDNo> ddi section; back in the DVN3 days we used "handle" and "DOI"
// for the 2 supported protocols, respectively. For the sake of backward
// compatibility, we should probably stick with these labels: (-- L.A. 4.5)
if ("hdl".equals(persistentAgency)) {
persistentAgency = "handle";
} else if ("doi".equals(persistentAgency)) {
persistentAgency = "DOI";
}
String persistentAuthority = datasetDto.getAuthority();
String persistentId = datasetDto.getIdentifier();
xmlw.writeStartElement("docDscr");
xmlw.writeStartElement("citation");
xmlw.writeStartElement("titlStmt");
writeFullElement(xmlw, "titl", dto2Primitive(version, DatasetFieldConstant.title));
xmlw.writeStartElement("IDNo");
writeAttribute(xmlw, "agency", persistentAgency);
xmlw.writeCharacters(persistentProtocol + ":" + persistentAuthority + "/" + persistentId);
xmlw.writeEndElement(); // IDNo
xmlw.writeEndElement(); // titlStmt
xmlw.writeStartElement("distStmt");
if (datasetDto.getPublisher() != null && !datasetDto.getPublisher().equals("")) {
xmlw.writeStartElement("distrbtr");
writeAttribute(xmlw, "source", "archive");
xmlw.writeCharacters(datasetDto.getPublisher());
xmlw.writeEndElement(); // distrbtr
}
writeFullElement(xmlw, "distDate", datasetDto.getPublicationDate());
xmlw.writeEndElement(); // diststmt
writeVersionStatement(xmlw, version);
xmlw.writeStartElement("biblCit");
xmlw.writeCharacters(version.getCitation());
xmlw.writeEndElement(); // biblCit
xmlw.writeEndElement(); // citation
xmlw.writeEndElement(); // docDscr
}
private static void writeVersionStatement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException{
xmlw.writeStartElement("verStmt");
writeAttribute(xmlw,"source","DVN");
xmlw.writeStartElement("version");
writeAttribute(xmlw,"date", datasetVersionDTO.getReleaseTime().substring(0, 10));
writeAttribute(xmlw,"type", datasetVersionDTO.getVersionState().toString());
xmlw.writeCharacters(datasetVersionDTO.getVersionNumber().toString());
xmlw.writeEndElement(); // version
xmlw.writeEndElement(); // verStmt
}
private static void writeSummaryDescriptionElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
xmlw.writeStartElement("sumDscr");
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
Integer per = 0;
Integer coll = 0;
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.timePeriodCovered.equals(fieldDTO.getTypeName())) {
String dateValStart = "";
String dateValEnd = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
per++;
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.timePeriodCoveredStart.equals(next.getTypeName())) {
dateValStart = next.getSinglePrimitive();
}
if (DatasetFieldConstant.timePeriodCoveredEnd.equals(next.getTypeName())) {
dateValEnd = next.getSinglePrimitive();
}
}
if (!dateValStart.isEmpty()) {
writeDateElement(xmlw, "timePrd", "P"+ per.toString(), "start", dateValStart );
}
if (!dateValEnd.isEmpty()) {
writeDateElement(xmlw, "timePrd", "P"+ per.toString(), "end", dateValEnd );
}
}
}
if (DatasetFieldConstant.dateOfCollection.equals(fieldDTO.getTypeName())) {
String dateValStart = "";
String dateValEnd = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
coll++;
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.dateOfCollectionStart.equals(next.getTypeName())) {
dateValStart = next.getSinglePrimitive();
}
if (DatasetFieldConstant.dateOfCollectionEnd.equals(next.getTypeName())) {
dateValEnd = next.getSinglePrimitive();
}
}
if (!dateValStart.isEmpty()) {
writeDateElement(xmlw, "collDate", "P"+ coll.toString(), "start", dateValStart );
}
if (!dateValEnd.isEmpty()) {
writeDateElement(xmlw, "collDate", "P"+ coll.toString(), "end", dateValEnd );
}
}
}
if (DatasetFieldConstant.kindOfData.equals(fieldDTO.getTypeName())) {
writeMultipleElement(xmlw, "dataKind", fieldDTO);
}
}
}
if("geospatial".equals(key)){
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.geographicCoverage.equals(fieldDTO.getTypeName())) {
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
HashMap<String, String> geoMap = new HashMap<>();
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.country.equals(next.getTypeName())) {
geoMap.put("country", next.getSinglePrimitive());
}
if (DatasetFieldConstant.city.equals(next.getTypeName())) {
geoMap.put("city", next.getSinglePrimitive());
}
if (DatasetFieldConstant.state.equals(next.getTypeName())) {
geoMap.put("state", next.getSinglePrimitive());
}
if (DatasetFieldConstant.otherGeographicCoverage.equals(next.getTypeName())) {
geoMap.put("otherGeographicCoverage", next.getSinglePrimitive());
}
}
if (geoMap.get("country") != null) {
writeFullElement(xmlw, "nation", geoMap.get("country"));
}
if (geoMap.get("city") != null) {
writeFullElement(xmlw, "geogCover", geoMap.get("city"));
}
if (geoMap.get("state") != null) {
writeFullElement(xmlw, "geogCover", geoMap.get("state"));
}
if (geoMap.get("otherGeographicCoverage") != null) {
writeFullElement(xmlw, "geogCover", geoMap.get("otherGeographicCoverage"));
}
}
}
if (DatasetFieldConstant.geographicBoundingBox.equals(fieldDTO.getTypeName())) {
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
xmlw.writeStartElement("geoBndBox");
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.westLongitude.equals(next.getTypeName())) {
writeFullElement(xmlw, "westBL", next.getSinglePrimitive());
}
if (DatasetFieldConstant.eastLongitude.equals(next.getTypeName())) {
writeFullElement(xmlw, "eastBL", next.getSinglePrimitive());
}
if (DatasetFieldConstant.northLatitude.equals(next.getTypeName())) {
writeFullElement(xmlw, "northBL", next.getSinglePrimitive());
}
if (DatasetFieldConstant.southLatitude.equals(next.getTypeName())) {
writeFullElement(xmlw, "southBL", next.getSinglePrimitive());
}
}
xmlw.writeEndElement();
}
}
}
writeFullElementList(xmlw, "geogUnit", dto2PrimitiveList(datasetVersionDTO, DatasetFieldConstant.geographicUnit));
}
if("socialscience".equals(key)){
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.universe.equals(fieldDTO.getTypeName())) {
writeMultipleElement(xmlw, "universe", fieldDTO);
}
if (DatasetFieldConstant.unitOfAnalysis.equals(fieldDTO.getTypeName())) {
writeMultipleElement(xmlw, "anlyUnit", fieldDTO);
}
}
}
}
xmlw.writeEndElement(); //sumDscr
}
private static void writeMultipleElement(XMLStreamWriter xmlw, String element, FieldDTO fieldDTO) throws XMLStreamException {
for (String value : fieldDTO.getMultiplePrimitive()) {
writeFullElement(xmlw, element, value);
}
}
private static void writeDateElement(XMLStreamWriter xmlw, String element, String cycle, String event, String dateIn) throws XMLStreamException {
xmlw.writeStartElement(element);
writeAttribute(xmlw, "cycle", cycle);
writeAttribute(xmlw, "event", event);
writeAttribute(xmlw, "date", dateIn);
xmlw.writeCharacters(dateIn);
xmlw.writeEndElement();
}
private static void writeMethodElement(XMLStreamWriter xmlw , DatasetVersionDTO version) throws XMLStreamException{
xmlw.writeStartElement("method");
xmlw.writeStartElement("dataColl");
writeFullElement(xmlw, "timeMeth", dto2Primitive(version, DatasetFieldConstant.timeMethod));
writeFullElement(xmlw, "dataCollector", dto2Primitive(version, DatasetFieldConstant.dataCollector));
writeFullElement(xmlw, "collectorTraining", dto2Primitive(version, DatasetFieldConstant.collectorTraining));
writeFullElement(xmlw, "frequenc", dto2Primitive(version, DatasetFieldConstant.frequencyOfDataCollection));
writeFullElement(xmlw, "sampProc", dto2Primitive(version, DatasetFieldConstant.samplingProcedure));
writeTargetSampleElement(xmlw, version);
writeFullElement(xmlw, "deviat", dto2Primitive(version, DatasetFieldConstant.deviationsFromSampleDesign));
xmlw.writeStartElement("sources");
writeFullElementList(xmlw, "dataSrc", dto2PrimitiveList(version, DatasetFieldConstant.dataSources));
writeFullElement(xmlw, "srcOrig", dto2Primitive(version, DatasetFieldConstant.originOfSources));
writeFullElement(xmlw, "srcChar", dto2Primitive(version, DatasetFieldConstant.characteristicOfSources));
writeFullElement(xmlw, "srcDocu", dto2Primitive(version, DatasetFieldConstant.accessToSources));
xmlw.writeEndElement(); //sources
writeFullElement(xmlw, "collMode", dto2Primitive(version, DatasetFieldConstant.collectionMode));
writeFullElement(xmlw, "resInstru", dto2Primitive(version, DatasetFieldConstant.researchInstrument));
writeFullElement(xmlw, "collSitu", dto2Primitive(version, DatasetFieldConstant.dataCollectionSituation));
writeFullElement(xmlw, "actMin", dto2Primitive(version, DatasetFieldConstant.actionsToMinimizeLoss));
writeFullElement(xmlw, "conOps", dto2Primitive(version, DatasetFieldConstant.controlOperations));
writeFullElement(xmlw, "weight", dto2Primitive(version, DatasetFieldConstant.weighting));
writeFullElement(xmlw, "cleanOps", dto2Primitive(version, DatasetFieldConstant.cleaningOperations));
xmlw.writeEndElement(); //dataColl
xmlw.writeStartElement("anlyInfo");
//writeFullElement(xmlw, "anylInfo", dto2Primitive(version, DatasetFieldConstant.datasetLevelErrorNotes));
writeFullElement(xmlw, "respRate", dto2Primitive(version, DatasetFieldConstant.responseRate));
writeFullElement(xmlw, "EstSmpErr", dto2Primitive(version, DatasetFieldConstant.samplingErrorEstimates));
writeFullElement(xmlw, "dataAppr", dto2Primitive(version, DatasetFieldConstant.otherDataAppraisal));
xmlw.writeEndElement(); //anlyInfo
writeNotesElement(xmlw, version);
xmlw.writeEndElement();//method
}
private static void writeSubjectElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException{
//Key Words and Topic Classification
xmlw.writeStartElement("subject");
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.subject.equals(fieldDTO.getTypeName())){
for ( String subject : fieldDTO.getMultipleVocab()){
xmlw.writeStartElement("keyword");
xmlw.writeCharacters(subject);
xmlw.writeEndElement(); //Keyword
}
}
if (DatasetFieldConstant.keyword.equals(fieldDTO.getTypeName())) {
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
String keywordValue = "";
String keywordVocab = "";
String keywordURI = "";
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.keywordValue.equals(next.getTypeName())) {
keywordValue = next.getSinglePrimitive();
}
if (DatasetFieldConstant.keywordVocab.equals(next.getTypeName())) {
keywordVocab = next.getSinglePrimitive();
}
if (DatasetFieldConstant.keywordVocabURI.equals(next.getTypeName())) {
keywordURI = next.getSinglePrimitive();
}
}
if (!keywordValue.isEmpty()){
xmlw.writeStartElement("keyword");
if(!keywordVocab.isEmpty()){
writeAttribute(xmlw,"vocab",keywordVocab);
}
if(!keywordURI.isEmpty()){
writeAttribute(xmlw,"vocabURI",keywordURI);
}
xmlw.writeCharacters(keywordValue);
xmlw.writeEndElement(); //Keyword
}
}
}
if (DatasetFieldConstant.topicClassification.equals(fieldDTO.getTypeName())) {
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
String topicClassificationValue = "";
String topicClassificationVocab = "";
String topicClassificationURI = "";
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.topicClassValue.equals(next.getTypeName())) {
topicClassificationValue = next.getSinglePrimitive();
}
if (DatasetFieldConstant.topicClassVocab.equals(next.getTypeName())) {
topicClassificationVocab = next.getSinglePrimitive();
}
if (DatasetFieldConstant.topicClassVocabURI.equals(next.getTypeName())) {
topicClassificationURI = next.getSinglePrimitive();
}
}
if (!topicClassificationValue.isEmpty()){
xmlw.writeStartElement("topcClas");
if(!topicClassificationVocab.isEmpty()){
writeAttribute(xmlw,"vocab",topicClassificationVocab);
}
if(!topicClassificationURI.isEmpty()){
writeAttribute(xmlw,"vocabURI",topicClassificationURI);
}
xmlw.writeCharacters(topicClassificationValue);
xmlw.writeEndElement(); //topcClas
}
}
}
}
}
}
xmlw.writeEndElement(); // subject
}
private static void writeAuthorsElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
xmlw.writeStartElement("rspStmt");
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.author.equals(fieldDTO.getTypeName())) {
String authorName = "";
String authorAffiliation = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.authorName.equals(next.getTypeName())) {
authorName = next.getSinglePrimitive();
}
if (DatasetFieldConstant.authorAffiliation.equals(next.getTypeName())) {
authorAffiliation = next.getSinglePrimitive();
}
}
if (!authorName.isEmpty()){
xmlw.writeStartElement("AuthEnty");
if(!authorAffiliation.isEmpty()){
writeAttribute(xmlw,"affiliation",authorAffiliation);
}
xmlw.writeCharacters(authorName);
xmlw.writeEndElement(); //AuthEnty
}
}
} else if (DatasetFieldConstant.contributor.equals(fieldDTO.getTypeName())) {
String contributorName = "";
String contributorType = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.contributorName.equals(next.getTypeName())) {
contributorName = next.getSinglePrimitive();
}
if (DatasetFieldConstant.contributorType.equals(next.getTypeName())) {
contributorType = next.getSinglePrimitive();
}
}
if (!contributorName.isEmpty()){
xmlw.writeStartElement("othId");
if(!contributorType.isEmpty()){
writeAttribute(xmlw,"role", contributorType);
}
xmlw.writeCharacters(contributorName);
xmlw.writeEndElement(); //othId
}
}
}
}
xmlw.writeEndElement(); //rspStmt
}
}
}
private static void writeContactsElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.datasetContact.equals(fieldDTO.getTypeName())) {
String datasetContactName = "";
String datasetContactAffiliation = "";
String datasetContactEmail = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.datasetContactName.equals(next.getTypeName())) {
datasetContactName = next.getSinglePrimitive();
}
if (DatasetFieldConstant.datasetContactAffiliation.equals(next.getTypeName())) {
datasetContactAffiliation = next.getSinglePrimitive();
}
if (DatasetFieldConstant.datasetContactEmail.equals(next.getTypeName())) {
datasetContactEmail = next.getSinglePrimitive();
}
}
// TODO: Since datasetContactEmail is a required field but datasetContactName is not consider not checking if datasetContactName is empty so we can write out datasetContactEmail.
if (!datasetContactName.isEmpty()){
xmlw.writeStartElement("contact");
if(!datasetContactAffiliation.isEmpty()){
writeAttribute(xmlw,"affiliation",datasetContactAffiliation);
}
if(!datasetContactEmail.isEmpty()){
writeAttribute(xmlw,"email",datasetContactEmail);
}
xmlw.writeCharacters(datasetContactName);
xmlw.writeEndElement(); //AuthEnty
}
}
}
}
}
}
}
private static void writeProducersElement(XMLStreamWriter xmlw, DatasetVersionDTO version) throws XMLStreamException {
xmlw.writeStartElement("prodStmt");
for (Map.Entry<String, MetadataBlockDTO> entry : version.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.producer.equals(fieldDTO.getTypeName())) {
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
String producerName = "";
String producerAffiliation = "";
String producerAbbreviation = "";
String producerLogo = "";
String producerURL = "";
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.producerName.equals(next.getTypeName())) {
producerName = next.getSinglePrimitive();
}
if (DatasetFieldConstant.producerAffiliation.equals(next.getTypeName())) {
producerAffiliation = next.getSinglePrimitive();
}
if (DatasetFieldConstant.producerAbbreviation.equals(next.getTypeName())) {
producerAbbreviation = next.getSinglePrimitive();
}
if (DatasetFieldConstant.producerLogo.equals(next.getTypeName())) {
producerLogo = next.getSinglePrimitive();
}
if (DatasetFieldConstant.producerURL.equals(next.getTypeName())) {
producerURL = next.getSinglePrimitive();
}
}
if (!producerName.isEmpty()) {
xmlw.writeStartElement("producer");
if (!producerAffiliation.isEmpty()) {
writeAttribute(xmlw, "affiliation", producerAffiliation);
}
if (!producerAbbreviation.isEmpty()) {
writeAttribute(xmlw, "abbr", producerAbbreviation);
}
if (!producerLogo.isEmpty()) {
writeAttribute(xmlw, "role", producerLogo);
}
if (!producerURL.isEmpty()) {
writeAttribute(xmlw, "URI", producerURL);
}
xmlw.writeCharacters(producerName);
xmlw.writeEndElement(); //AuthEnty
}
}
}
}
}
}
writeFullElement(xmlw, "prodDate", dto2Primitive(version, DatasetFieldConstant.productionDate));
writeFullElement(xmlw, "prodPlac", dto2Primitive(version, DatasetFieldConstant.productionPlace));
writeSoftwareElement(xmlw, version);
writeGrantElement(xmlw, version);
xmlw.writeEndElement(); //prodStmt
}
private static void writeDistributorsElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.distributor.equals(fieldDTO.getTypeName())) {
//xmlw.writeStartElement("distrbtr");
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
String distributorName = "";
String distributorAffiliation = "";
String distributorAbbreviation = "";
String distributorURL = "";
String distributorLogoURL = "";
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.distributorName.equals(next.getTypeName())) {
distributorName = next.getSinglePrimitive();
}
if (DatasetFieldConstant.distributorAffiliation.equals(next.getTypeName())) {
distributorAffiliation = next.getSinglePrimitive();
}
if (DatasetFieldConstant.distributorAbbreviation.equals(next.getTypeName())) {
distributorAbbreviation = next.getSinglePrimitive();
}
if (DatasetFieldConstant.distributorURL.equals(next.getTypeName())) {
distributorURL = next.getSinglePrimitive();
}
if (DatasetFieldConstant.distributorLogo.equals(next.getTypeName())) {
distributorLogoURL = next.getSinglePrimitive();
}
}
if (!distributorName.isEmpty()) {
xmlw.writeStartElement("distrbtr");
if (!distributorAffiliation.isEmpty()) {
writeAttribute(xmlw, "affiliation", distributorAffiliation);
}
if (!distributorAbbreviation.isEmpty()) {
writeAttribute(xmlw, "abbr", distributorAbbreviation);
}
if (!distributorURL.isEmpty()) {
writeAttribute(xmlw, "URI", distributorURL);
}
if (!distributorLogoURL.isEmpty()) {
writeAttribute(xmlw, "role", distributorLogoURL);
}
xmlw.writeCharacters(distributorName);
xmlw.writeEndElement(); //AuthEnty
}
}
//xmlw.writeEndElement(); //rspStmt
}
}
}
}
}
private static void writeRelPublElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.publication.equals(fieldDTO.getTypeName())) {
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
String pubString = "";
String citation = "";
String IDType = "";
String IDNo = "";
String url = "";
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.publicationCitation.equals(next.getTypeName())) {
citation = next.getSinglePrimitive();
}
if (DatasetFieldConstant.publicationIDType.equals(next.getTypeName())) {
IDType = next.getSinglePrimitive();
}
if (DatasetFieldConstant.publicationIDNumber.equals(next.getTypeName())) {
IDNo = next.getSinglePrimitive();
}
if (DatasetFieldConstant.publicationURL.equals(next.getTypeName())) {
url = next.getSinglePrimitive();
}
}
if (citation != null && !citation.trim().equals("")) {
xmlw.writeStartElement("relPubl");
xmlw.writeStartElement("citation");
if (IDNo != null && !IDNo.trim().equals("")) {
xmlw.writeStartElement("titlStmt");
xmlw.writeStartElement("IDNo");
if (IDType != null && !IDType.trim().equals("")) {
xmlw.writeAttribute("agency", IDType );
}
xmlw.writeCharacters(IDNo);
xmlw.writeEndElement(); //IDNo
xmlw.writeEndElement(); // titlStmt
}
writeFullElement(xmlw,"biblCit",citation);
xmlw.writeEndElement(); //citation
if (url != null && !url.trim().equals("") ) {
xmlw.writeStartElement("ExtLink");
xmlw.writeAttribute("URI", url);
xmlw.writeEndElement(); //ExtLink
}
xmlw.writeEndElement(); //relPubl
}
}
}
}
}
}
}
private static String appendCommaSeparatedValue(String inVal, String next) {
if (!next.isEmpty()) {
if (!inVal.isEmpty()) {
return inVal + ", " + next;
} else {
return next;
}
}
return inVal;
}
private static void writeAbstractElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.description.equals(fieldDTO.getTypeName())) {
String descriptionText = "";
String descriptionDate = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.descriptionText.equals(next.getTypeName())) {
descriptionText = next.getSinglePrimitive();
}
if (DatasetFieldConstant.descriptionDate.equals(next.getTypeName())) {
descriptionDate = next.getSinglePrimitive();
}
}
if (!descriptionText.isEmpty()){
xmlw.writeStartElement("abstract");
if(!descriptionDate.isEmpty()){
writeAttribute(xmlw,"date",descriptionDate);
}
xmlw.writeCharacters(descriptionText);
xmlw.writeEndElement(); //abstract
}
}
}
}
}
}
}
private static void writeGrantElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.grantNumber.equals(fieldDTO.getTypeName())) {
String grantNumber = "";
String grantAgency = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.grantNumberValue.equals(next.getTypeName())) {
grantNumber = next.getSinglePrimitive();
}
if (DatasetFieldConstant.grantNumberAgency.equals(next.getTypeName())) {
grantAgency = next.getSinglePrimitive();
}
}
if (!grantNumber.isEmpty()){
xmlw.writeStartElement("grantNo");
if(!grantAgency.isEmpty()){
writeAttribute(xmlw,"agency",grantAgency);
}
xmlw.writeCharacters(grantNumber);
xmlw.writeEndElement(); //grantno
}
}
}
}
}
}
}
private static void writeOtherIdElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.otherId.equals(fieldDTO.getTypeName())) {
String otherId = "";
String otherIdAgency = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.otherIdValue.equals(next.getTypeName())) {
otherId = next.getSinglePrimitive();
}
if (DatasetFieldConstant.otherIdAgency.equals(next.getTypeName())) {
otherIdAgency = next.getSinglePrimitive();
}
}
if (!otherId.isEmpty()){
xmlw.writeStartElement("IDNo");
if(!otherIdAgency.isEmpty()){
writeAttribute(xmlw,"agency",otherIdAgency);
}
xmlw.writeCharacters(otherId);
xmlw.writeEndElement(); //IDNo
}
}
}
}
}
}
}
private static void writeSoftwareElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.software.equals(fieldDTO.getTypeName())) {
String softwareName = "";
String softwareVersion = "";
for (HashSet<FieldDTO> foo : fieldDTO.getMultipleCompound()) {
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.softwareName.equals(next.getTypeName())) {
softwareName = next.getSinglePrimitive();
}
if (DatasetFieldConstant.softwareVersion.equals(next.getTypeName())) {
softwareVersion = next.getSinglePrimitive();
}
}
if (!softwareName.isEmpty()){
xmlw.writeStartElement("software");
if(!softwareVersion.isEmpty()){
writeAttribute(xmlw,"version",softwareVersion);
}
xmlw.writeCharacters(softwareName);
xmlw.writeEndElement(); //software
}
}
}
}
}
}
}
private static void writeSeriesElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("citation".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.series.equals(fieldDTO.getTypeName())) {
xmlw.writeStartElement("serStmt");
String seriesName = "";
String seriesInformation = "";
Set<FieldDTO> foo = fieldDTO.getSingleCompound();
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.seriesName.equals(next.getTypeName())) {
seriesName = next.getSinglePrimitive();
}
if (DatasetFieldConstant.seriesInformation.equals(next.getTypeName())) {
seriesInformation = next.getSinglePrimitive();
}
}
if (!seriesName.isEmpty()){
xmlw.writeStartElement("serName");
xmlw.writeCharacters(seriesName);
xmlw.writeEndElement(); //grantno
}
if (!seriesInformation.isEmpty()){
xmlw.writeStartElement("serInfo");
xmlw.writeCharacters(seriesInformation);
xmlw.writeEndElement(); //grantno
}
xmlw.writeEndElement(); //serStmt
}
}
}
}
}
private static void writeTargetSampleElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("socialscience".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.targetSampleSize.equals(fieldDTO.getTypeName())) {
xmlw.writeStartElement("targetSampleSize");
String sizeFormula = "";
String actualSize = "";
Set<FieldDTO> foo = fieldDTO.getSingleCompound();
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.targetSampleSizeFormula.equals(next.getTypeName())) {
sizeFormula = next.getSinglePrimitive();
}
if (DatasetFieldConstant.targetSampleActualSize.equals(next.getTypeName())) {
actualSize = next.getSinglePrimitive();
}
}
if (!sizeFormula.isEmpty()) {
xmlw.writeStartElement("sampleSizeFormula");
xmlw.writeCharacters(sizeFormula);
xmlw.writeEndElement(); //sampleSizeFormula
}
if (!actualSize.isEmpty()) {
xmlw.writeStartElement("sampleSize");
xmlw.writeCharacters(actualSize);
xmlw.writeEndElement(); //sampleSize
}
xmlw.writeEndElement(); // targetSampleSize
}
}
}
}
}
private static void writeNotesElement(XMLStreamWriter xmlw, DatasetVersionDTO datasetVersionDTO) throws XMLStreamException {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
String key = entry.getKey();
MetadataBlockDTO value = entry.getValue();
if ("socialscience".equals(key)) {
for (FieldDTO fieldDTO : value.getFields()) {
if (DatasetFieldConstant.socialScienceNotes.equals(fieldDTO.getTypeName())) {
String notesText = "";
String notesType = "";
String notesSubject= "";
Set<FieldDTO> foo = fieldDTO.getSingleCompound();
for (Iterator<FieldDTO> iterator = foo.iterator(); iterator.hasNext();) {
FieldDTO next = iterator.next();
if (DatasetFieldConstant.socialScienceNotesText.equals(next.getTypeName())) {
notesText = next.getSinglePrimitive();
}
if (DatasetFieldConstant.socialScienceNotesType.equals(next.getTypeName())) {
notesType = next.getSinglePrimitive();
}
if (DatasetFieldConstant.socialScienceNotesSubject.equals(next.getTypeName())) {
notesSubject = next.getSinglePrimitive();
}
}
if (!notesText.isEmpty()) {
xmlw.writeStartElement("notes");
if(!notesType.isEmpty()){
writeAttribute(xmlw,"type",notesType);
}
if(!notesSubject.isEmpty()){
writeAttribute(xmlw,"subject",notesSubject);
}
xmlw.writeCharacters(notesText);
xmlw.writeEndElement();
}
}
}
}
}
}
// TODO:
// see if there's more information that we could encode in this otherMat.
// contentType? Unfs and such? (in the "short" DDI that is being used for
// harvesting *all* files are encoded as otherMats; even tabular ones.
private static void createOtherMats(XMLStreamWriter xmlw, List<FileDTO> fileDtos) throws XMLStreamException {
// The preferred URL for this dataverse, for cooking up the file access API links:
String dataverseUrl = getDataverseSiteUrl();
for (FileDTO fileDTo : fileDtos) {
// We'll continue using the scheme we've used before, in DVN2-3: non-tabular files are put into otherMat,
// tabular ones - in fileDscr sections. (fileDscr sections have special fields for numbers of variables
// and observations, etc.)
if (fileDTo.getDataFile().getDataTables() == null || fileDTo.getDataFile().getDataTables().isEmpty()) {
xmlw.writeStartElement("otherMat");
writeAttribute(xmlw, "ID", "f" + fileDTo.getDataFile().getId());
String pidURL = fileDTo.getDataFile().getPidURL();
if (pidURL != null && !pidURL.isEmpty()){
writeAttribute(xmlw, "URI", pidURL);
} else {
writeAttribute(xmlw, "URI", dataverseUrl + "/api/access/datafile/" + fileDTo.getDataFile().getId());
}
writeAttribute(xmlw, "level", "datafile");
xmlw.writeStartElement("labl");
xmlw.writeCharacters(fileDTo.getDataFile().getFilename());
xmlw.writeEndElement(); // labl
writeFileDescription(xmlw, fileDTo);
// there's no readily available field in the othermat section
// for the content type (aka mime type); so we'll store it in this
// specially formatted notes section:
String contentType = fileDTo.getDataFile().getContentType();
if (!StringUtilisEmpty(contentType)) {
xmlw.writeStartElement("notes");
writeAttribute(xmlw, "level", LEVEL_FILE);
writeAttribute(xmlw, "type", NOTE_TYPE_CONTENTTYPE);
writeAttribute(xmlw, "subject", NOTE_SUBJECT_CONTENTTYPE);
xmlw.writeCharacters(contentType);
xmlw.writeEndElement(); // notes
}
xmlw.writeEndElement(); // otherMat
}
}
}
// An alternative version of the createOtherMats method - this one is used
// when a "full" DDI is being cooked; just like the fileDscr and data/var sections methods,
// it operates on the list of FileMetadata entities, not on File DTOs. This is because
// DTOs do not support "tabular", variable-level metadata yet. And we need to be able to
// tell if this file is in fact tabular data - so that we know if it needs an
// otherMat, or a fileDscr section.
// -- L.A. 4.5
private static void createOtherMatsFromFileMetadatas(XMLStreamWriter xmlw, List<FileMetadata> fileMetadatas) throws XMLStreamException {
// The preferred URL for this dataverse, for cooking up the file access API links:
String dataverseUrl = getDataverseSiteUrl();
for (FileMetadata fileMetadata : fileMetadatas) {
// We'll continue using the scheme we've used before, in DVN2-3: non-tabular files are put into otherMat,
// tabular ones - in fileDscr sections. (fileDscr sections have special fields for numbers of variables
// and observations, etc.)
if (fileMetadata.getDataFile() != null && !fileMetadata.getDataFile().isTabularData()) {
xmlw.writeStartElement("otherMat");
writeAttribute(xmlw, "ID", "f" + fileMetadata.getDataFile().getId());
String dfIdentifier = fileMetadata.getDataFile().getIdentifier();
if (dfIdentifier != null && !dfIdentifier.isEmpty()){
GlobalId globalId = new GlobalId(fileMetadata.getDataFile());
writeAttribute(xmlw, "URI", globalId.toURL().toString());
} else {
writeAttribute(xmlw, "URI", dataverseUrl + "/api/access/datafile/" + fileMetadata.getDataFile().getId());
}
writeAttribute(xmlw, "level", "datafile");
xmlw.writeStartElement("labl");
xmlw.writeCharacters(fileMetadata.getLabel());
xmlw.writeEndElement(); // labl
String description = fileMetadata.getDescription();
if (description != null) {
xmlw.writeStartElement("txt");
xmlw.writeCharacters(description);
xmlw.writeEndElement(); // txt
}
// there's no readily available field in the othermat section
// for the content type (aka mime type); so we'll store it in this
// specially formatted notes section:
String contentType = fileMetadata.getDataFile().getContentType();
if (!StringUtilisEmpty(contentType)) {
xmlw.writeStartElement("notes");
writeAttribute(xmlw, "level", LEVEL_FILE);
writeAttribute(xmlw, "type", NOTE_TYPE_CONTENTTYPE);
writeAttribute(xmlw, "subject", NOTE_SUBJECT_CONTENTTYPE);
xmlw.writeCharacters(contentType);
xmlw.writeEndElement(); // notes
}
xmlw.writeEndElement(); // otherMat
}
}
}
private static void writeFileDescription(XMLStreamWriter xmlw, FileDTO fileDTo) throws XMLStreamException {
xmlw.writeStartElement("txt");
String description = fileDTo.getDataFile().getDescription();
if (description != null) {
xmlw.writeCharacters(description);
}
xmlw.writeEndElement(); // txt
}
private static String dto2Primitive(DatasetVersionDTO datasetVersionDTO, String datasetFieldTypeName) {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
MetadataBlockDTO value = entry.getValue();
for (FieldDTO fieldDTO : value.getFields()) {
if (datasetFieldTypeName.equals(fieldDTO.getTypeName())) {
return fieldDTO.getSinglePrimitive();
}
}
}
return null;
}
private static List<String> dto2PrimitiveList(DatasetVersionDTO datasetVersionDTO, String datasetFieldTypeName) {
for (Map.Entry<String, MetadataBlockDTO> entry : datasetVersionDTO.getMetadataBlocks().entrySet()) {
MetadataBlockDTO value = entry.getValue();
for (FieldDTO fieldDTO : value.getFields()) {
if (datasetFieldTypeName.equals(fieldDTO.getTypeName())) {
return fieldDTO.getMultiplePrimitive();
}
}
}
return null;
}
private static void writeFullElementList(XMLStreamWriter xmlw, String name, List<String> values) throws XMLStreamException {
//For the simplest Elements we can
if (values != null && !values.isEmpty()) {
for (String value : values) {
xmlw.writeStartElement(name);
xmlw.writeCharacters(value);
xmlw.writeEndElement(); // labl
}
}
}
private static void writeFullElement (XMLStreamWriter xmlw, String name, String value) throws XMLStreamException {
//For the simplest Elements we can
if (!StringUtilisEmpty(value)) {
xmlw.writeStartElement(name);
xmlw.writeCharacters(value);
xmlw.writeEndElement(); // labl
}
}
private static void writeAttribute(XMLStreamWriter xmlw, String name, String value) throws XMLStreamException {
if (!StringUtilisEmpty(value)) {
xmlw.writeAttribute(name, value);
}
}
private static boolean StringUtilisEmpty(String str) {
if (str == null || str.trim().equals("")) {
return true;
}
return false;
}
private static void saveJsonToDisk(String datasetVersionAsJson) throws IOException {
Files.write(Paths.get("/tmp/out.json"), datasetVersionAsJson.getBytes());
}
/**
* The "official", designated URL of the site;
* can be defined as a complete URL; or derived from the
* "official" hostname. If none of these options is set,
* defaults to the InetAddress.getLocalHOst() and https;
*/
private static String getDataverseSiteUrl() {
String hostUrl = System.getProperty(SITE_URL);
if (hostUrl != null && !"".equals(hostUrl)) {
return hostUrl;
}
String hostName = System.getProperty(FQDN);
if (hostName == null) {
try {
hostName = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
hostName = null;
}
}
if (hostName != null) {
return "https://" + hostName;
}
return "http://localhost:8080";
}
// Methods specific to the tabular data ("<dataDscr>") section.
// Note that these do NOT operate on DTO objects, but instead directly
// on Dataverse DataVariable, DataTable, etc. objects.
// This is because for this release (4.5) we are recycling the already available
// code, and this is what we got. (We already have DTO objects for DataTable,
// and DataVariable, etc., but the current version JsonPrinter.jsonAsDatasetDto()
// does not produce JSON for these objects - it stops at DataFile. Eventually
// we want all of our objects to be exportable as JSON, and then all the exports
// can go through the same DTO state... But we don't have time for it now;
// plus, the structure of file-level metadata is currently being re-designed,
// so we probably should not invest any time into it right now). -- L.A. 4.5
public static void createDataDscr(XMLStreamWriter xmlw, DatasetVersion datasetVersion) throws XMLStreamException {
if (datasetVersion.getFileMetadatas() == null || datasetVersion.getFileMetadatas().isEmpty()) {
return;
}
boolean tabularData = false;
// we're not writing the opening <dataDscr> tag until we find an actual
// tabular datafile.
for (FileMetadata fileMetadata : datasetVersion.getFileMetadatas()) {
DataFile dataFile = fileMetadata.getDataFile();
if (dataFile != null && dataFile.isTabularData()) {
if (!tabularData) {
xmlw.writeStartElement("dataDscr");
tabularData = true;
}
for (VarGroup varGrp : fileMetadata.getVarGroups()) {
createVarGroupDDI(xmlw, varGrp);
}
List<DataVariable> vars = dataFile.getDataTable().getDataVariables();
for (DataVariable var : vars) {
createVarDDI(xmlw, var, fileMetadata);
}
}
}
if (tabularData) {
xmlw.writeEndElement(); // dataDscr
}
}
private static void createVarGroupDDI(XMLStreamWriter xmlw, VarGroup varGrp) throws XMLStreamException {
xmlw.writeStartElement("varGrp");
writeAttribute(xmlw, "ID", "VG" + varGrp.getId().toString());
String vars = "";
Set<DataVariable> varsInGroup = varGrp.getVarsInGroup();
for (DataVariable var : varsInGroup) {
vars = vars + " v" + var.getId();
}
vars = vars.trim();
writeAttribute(xmlw, "var", vars );
if (!StringUtilisEmpty(varGrp.getLabel())) {
xmlw.writeStartElement("labl");
xmlw.writeCharacters(varGrp.getLabel());
xmlw.writeEndElement(); // group label (labl)
}
xmlw.writeEndElement(); //varGrp
}
private static void createVarDDI(XMLStreamWriter xmlw, DataVariable dv, FileMetadata fileMetadata) throws XMLStreamException {
xmlw.writeStartElement("var");
writeAttribute(xmlw, "ID", "v" + dv.getId().toString());
writeAttribute(xmlw, "name", dv.getName());
VariableMetadata vm = null;
for (VariableMetadata vmIter : dv.getVariableMetadatas()) {
FileMetadata fm = vmIter.getFileMetadata();
if (fm != null && fm.equals(fileMetadata) ){
vm = vmIter;
break;
}
}
if (dv.getNumberOfDecimalPoints() != null) {
writeAttribute(xmlw, "dcml", dv.getNumberOfDecimalPoints().toString());
}
if (dv.isOrderedCategorical()) {
writeAttribute(xmlw, "nature", "ordinal");
}
if (dv.getInterval() != null) {
String interval = dv.getIntervalLabel();
if (interval != null) {
writeAttribute(xmlw, "intrvl", interval);
}
}
if (vm != null) {
if (vm.isIsweightvar()) {
writeAttribute(xmlw, "wgt", "wgt");
}
if (vm.isWeighted() && vm.getWeightvariable() != null) {
writeAttribute(xmlw, "wgt-var", "v"+vm.getWeightvariable().getId().toString());
}
}
// location
xmlw.writeEmptyElement("location");
if (dv.getFileStartPosition() != null) {
writeAttribute(xmlw, "StartPos", dv.getFileStartPosition().toString());
}
if (dv.getFileEndPosition() != null) {
writeAttribute(xmlw, "EndPos", dv.getFileEndPosition().toString());
}
if (dv.getRecordSegmentNumber() != null) {
writeAttribute(xmlw, "RecSegNo", dv.getRecordSegmentNumber().toString());
}
writeAttribute(xmlw, "fileid", "f" + dv.getDataTable().getDataFile().getId().toString());
// labl
if ((vm == null || StringUtilisEmpty(vm.getLabel())) && !StringUtilisEmpty(dv.getLabel())) {
xmlw.writeStartElement("labl");
writeAttribute(xmlw, "level", "variable");
xmlw.writeCharacters(dv.getLabel());
xmlw.writeEndElement(); //labl
} else if (vm != null && !StringUtilisEmpty(vm.getLabel())) {
xmlw.writeStartElement("labl");
writeAttribute(xmlw, "level", "variable");
xmlw.writeCharacters(vm.getLabel());
xmlw.writeEndElement(); //labl
}
if (vm != null) {
if (!StringUtilisEmpty(vm.getLiteralquestion()) || !StringUtilisEmpty(vm.getInterviewinstruction()) || !StringUtilisEmpty(vm.getPostquestion())) {
xmlw.writeStartElement("qstn");
if (!StringUtilisEmpty(vm.getLiteralquestion())) {
xmlw.writeStartElement("qstnLit");
xmlw.writeCharacters(vm.getLiteralquestion());
xmlw.writeEndElement(); // qstnLit
}
if (!StringUtilisEmpty(vm.getInterviewinstruction())) {
xmlw.writeStartElement("ivuInstr");
xmlw.writeCharacters(vm.getInterviewinstruction());
xmlw.writeEndElement(); //ivuInstr
}
if (!StringUtilisEmpty(vm.getPostquestion())) {
xmlw.writeStartElement("postQTxt");
xmlw.writeCharacters(vm.getPostquestion());
xmlw.writeEndElement(); //ivuInstr
}
xmlw.writeEndElement(); //qstn
}
}
// invalrng
boolean invalrngAdded = false;
for (VariableRange range : dv.getInvalidRanges()) {
//if (range.getBeginValueType() != null && range.getBeginValueType().getName().equals(DB_VAR_RANGE_TYPE_POINT)) {
if (range.getBeginValueType() != null && range.isBeginValueTypePoint()) {
if (range.getBeginValue() != null) {
invalrngAdded = checkParentElement(xmlw, "invalrng", invalrngAdded);
xmlw.writeEmptyElement("item");
writeAttribute(xmlw, "VALUE", range.getBeginValue());
}
} else {
invalrngAdded = checkParentElement(xmlw, "invalrng", invalrngAdded);
xmlw.writeEmptyElement("range");
if (range.getBeginValueType() != null && range.getBeginValue() != null) {
if (range.isBeginValueTypeMin()) {
writeAttribute(xmlw, "min", range.getBeginValue());
} else if (range.isBeginValueTypeMinExcl()) {
writeAttribute(xmlw, "minExclusive", range.getBeginValue());
}
}
if (range.getEndValueType() != null && range.getEndValue() != null) {
if (range.isEndValueTypeMax()) {
writeAttribute(xmlw, "max", range.getEndValue());
} else if (range.isEndValueTypeMaxExcl()) {
writeAttribute(xmlw, "maxExclusive", range.getEndValue());
}
}
}
}
if (invalrngAdded) {
xmlw.writeEndElement(); // invalrng
}
//universe
if (vm != null) {
if (!StringUtilisEmpty(vm.getUniverse())) {
xmlw.writeStartElement("universe");
xmlw.writeCharacters(vm.getUniverse());
xmlw.writeEndElement(); //universe
}
}
//sum stats
for (SummaryStatistic sumStat : dv.getSummaryStatistics()) {
xmlw.writeStartElement("sumStat");
if (sumStat.getTypeLabel() != null) {
writeAttribute(xmlw, "type", sumStat.getTypeLabel());
} else {
writeAttribute(xmlw, "type", "unknown");
}
xmlw.writeCharacters(sumStat.getValue());
xmlw.writeEndElement(); //sumStat
}
// categories
for (VariableCategory cat : dv.getCategories()) {
xmlw.writeStartElement("catgry");
if (cat.isMissing()) {
writeAttribute(xmlw, "missing", "Y");
}
// catValu
xmlw.writeStartElement("catValu");
xmlw.writeCharacters(cat.getValue());
xmlw.writeEndElement(); //catValu
// label
if (!StringUtilisEmpty(cat.getLabel())) {
xmlw.writeStartElement("labl");
writeAttribute(xmlw, "level", "category");
xmlw.writeCharacters(cat.getLabel());
xmlw.writeEndElement(); //labl
}
// catStat
if (cat.getFrequency() != null) {
xmlw.writeStartElement("catStat");
writeAttribute(xmlw, "type", "freq");
// if frequency is actually a long value, we want to write "100" instead of "100.0"
if (Math.floor(cat.getFrequency()) == cat.getFrequency()) {
xmlw.writeCharacters(new Long(cat.getFrequency().longValue()).toString());
} else {
xmlw.writeCharacters(cat.getFrequency().toString());
}
xmlw.writeEndElement(); //catStat
}
//catStat weighted freq
if (vm != null && vm.isWeighted()) {
for (CategoryMetadata cm : vm.getCategoriesMetadata()) {
if (cm.getCategory().getValue().equals(cat.getValue())) {
xmlw.writeStartElement("catStat");
writeAttribute(xmlw, "wgtd", "wgtd");
writeAttribute(xmlw, "type", "freq");
xmlw.writeCharacters(cm.getWfreq().toString());
xmlw.writeEndElement(); //catStat
break;
}
}
}
xmlw.writeEndElement(); //catgry
}
// varFormat
xmlw.writeEmptyElement("varFormat");
if (dv.isTypeNumeric()) {
writeAttribute(xmlw, "type", "numeric");
} else if (dv.isTypeCharacter()) {
writeAttribute(xmlw, "type", "character");
} else {
throw new XMLStreamException("Illegal Variable Format Type!");
}
writeAttribute(xmlw, "formatname", dv.getFormat());
//experiment writeAttribute(xmlw, "schema", dv.getFormatSchema());
writeAttribute(xmlw, "category", dv.getFormatCategory());
// notes
if (dv.getUnf() != null && !"".equals(dv.getUnf())) {
xmlw.writeStartElement("notes");
writeAttribute(xmlw, "subject", "Universal Numeric Fingerprint");
writeAttribute(xmlw, "level", "variable");
writeAttribute(xmlw, "type", "Dataverse:UNF");
xmlw.writeCharacters(dv.getUnf());
xmlw.writeEndElement(); //notes
}
if (vm != null) {
if (!StringUtilisEmpty(vm.getNotes())) {
xmlw.writeStartElement("notes");
xmlw.writeCData(vm.getNotes());
xmlw.writeEndElement(); //notes CDATA
}
}
xmlw.writeEndElement(); //var
}
private static void createFileDscr(XMLStreamWriter xmlw, DatasetVersion datasetVersion) throws XMLStreamException {
String dataverseUrl = getDataverseSiteUrl();
for (FileMetadata fileMetadata : datasetVersion.getFileMetadatas()) {
DataFile dataFile = fileMetadata.getDataFile();
if (dataFile != null && dataFile.isTabularData()) {
DataTable dt = dataFile.getDataTable();
xmlw.writeStartElement("fileDscr");
writeAttribute(xmlw, "ID", "f" + dataFile.getId());
writeAttribute(xmlw, "URI", dataverseUrl + "/api/access/datafile/" + dataFile.getId());
xmlw.writeStartElement("fileTxt");
xmlw.writeStartElement("fileName");
xmlw.writeCharacters(fileMetadata.getLabel());
xmlw.writeEndElement(); // fileName
if (dt.getCaseQuantity() != null || dt.getVarQuantity() != null || dt.getRecordsPerCase() != null) {
xmlw.writeStartElement("dimensns");
if (dt.getCaseQuantity() != null) {
xmlw.writeStartElement("caseQnty");
xmlw.writeCharacters(dt.getCaseQuantity().toString());
xmlw.writeEndElement(); // caseQnty
}
if (dt.getVarQuantity() != null) {
xmlw.writeStartElement("varQnty");
xmlw.writeCharacters(dt.getVarQuantity().toString());
xmlw.writeEndElement(); // varQnty
}
if (dt.getRecordsPerCase() != null) {
xmlw.writeStartElement("recPrCas");
xmlw.writeCharacters(dt.getRecordsPerCase().toString());
xmlw.writeEndElement(); // recPrCas
}
xmlw.writeEndElement(); // dimensns
}
xmlw.writeStartElement("fileType");
xmlw.writeCharacters(dataFile.getContentType());
xmlw.writeEndElement(); // fileType
xmlw.writeEndElement(); // fileTxt
// various notes:
// this specially formatted note section is used to store the UNF
// (Universal Numeric Fingerprint) signature:
if (dt.getUnf() != null && !dt.getUnf().equals("")) {
xmlw.writeStartElement("notes");
writeAttribute(xmlw, "level", LEVEL_FILE);
writeAttribute(xmlw, "type", NOTE_TYPE_UNF);
writeAttribute(xmlw, "subject", NOTE_SUBJECT_UNF);
xmlw.writeCharacters(dt.getUnf());
xmlw.writeEndElement(); // notes
}
if (dataFile.getTags() != null) {
for (int i = 0; i < dataFile.getTags().size(); i++) {
xmlw.writeStartElement("notes");
writeAttribute(xmlw, "level", LEVEL_FILE);
writeAttribute(xmlw, "type", NOTE_TYPE_TAG);
writeAttribute(xmlw, "subject", NOTE_SUBJECT_TAG);
xmlw.writeCharacters(dataFile.getTags().get(i).getTypeLabel());
xmlw.writeEndElement(); // notes
}
}
// TODO: add the remaining fileDscr elements!
xmlw.writeEndElement(); // fileDscr
}
}
}
private static boolean checkParentElement(XMLStreamWriter xmlw, String elementName, boolean elementAdded) throws XMLStreamException {
if (!elementAdded) {
xmlw.writeStartElement(elementName);
}
return true;
}
public static void datasetHtmlDDI(InputStream datafile, OutputStream outputStream) throws XMLStreamException {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
try {
Document document;
InputStream styleSheetInput = DdiExportUtil.class.getClassLoader().getResourceAsStream("edu/harvard/iq/dataverse/codebook2-0.xsl");
DocumentBuilder builder = factory.newDocumentBuilder();
document = builder.parse(datafile);
// Use a Transformer for output
TransformerFactory tFactory = TransformerFactory.newInstance();
StreamSource stylesource = new StreamSource(styleSheetInput);
Transformer transformer = tFactory.newTransformer(stylesource);
DOMSource source = new DOMSource(document);
StreamResult result = new StreamResult(outputStream);
transformer.transform(source, result);
} catch (TransformerConfigurationException tce) {
// Error generated by the parser
logger.severe("Transformer Factory error" + " " + tce.getMessage());
} catch (TransformerException te) {
// Error generated by the parser
logger.severe("Transformation error" + " " + te.getMessage());
} catch (SAXException sxe) {
// Error generated by this application
// (or a parser-initialization error)
logger.severe("SAX error " + sxe.getMessage());
} catch (ParserConfigurationException pce) {
// Parser with specified options can't be built
logger.severe("Parser configuration error " + pce.getMessage());
} catch (IOException ioe) {
// I/O error
logger.info("I/O error " + ioe.getMessage());
}
}
}
| 1 | 42,521 | The string "DVN_3_0" appears in `if (!SOURCE_DVN_3_0.equals(xmlr.getAttributeValue(null, "source"))) {` in ImportDDIServiceBean. It's not an exact match so I don't *think* this will break dataset import from DDI but I thought I'd at least mention it. | IQSS-dataverse | java |
@@ -361,6 +361,15 @@ func (s *svc) CompareCommits(ctx context.Context, ref *RemoteRef, compareSHA str
}, nil
}
+func (s *svc) ListCommitsViaComparison(ctx context.Context, ref *RemoteRef, compareSHA string) ([]*githubv3.RepositoryCommit, error) {
+ comp, _, err := s.rest.Repositories.CompareCommits(ctx, ref.RepoOwner, ref.RepoName, compareSHA, ref.Ref)
+ if err != nil {
+ return nil, fmt.Errorf("could not get commits from comparison for %s and %s. %+v", ref.Ref, compareSHA, err)
+ }
+
+ return comp.Commits, nil
+}
+
type Commit struct {
Files []*githubv3.CommitFile
Message string | 1 | package github
// <!-- START clutchdoc -->
// description: GitHub client that combines the REST/GraphQL APIs and raw git capabilities into a single interface.
// <!-- END clutchdoc -->
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"time"
"github.com/go-git/go-billy/v5/memfs"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
gittransport "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/go-git/go-git/v5/storage/memory"
"github.com/golang/protobuf/ptypes/any"
githubv3 "github.com/google/go-github/v37/github"
"github.com/shurcooL/githubv4"
"github.com/uber-go/tally"
"go.uber.org/zap"
"golang.org/x/oauth2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
githubv1 "github.com/lyft/clutch/backend/api/config/service/github/v1"
scgithubv1 "github.com/lyft/clutch/backend/api/sourcecontrol/github/v1"
sourcecontrolv1 "github.com/lyft/clutch/backend/api/sourcecontrol/v1"
"github.com/lyft/clutch/backend/service"
)
const Name = "clutch.service.github"
const CurrentUser = ""
type FileMap map[string]io.ReadCloser
func New(cfg *any.Any, logger *zap.Logger, scope tally.Scope) (service.Service, error) {
config := &githubv1.Config{}
if err := cfg.UnmarshalTo(config); err != nil {
return nil, err
}
return newService(config), nil
}
// Remote ref points to a git reference using a combination of the repository and the reference itself.
type RemoteRef struct {
// Organization or user that owns the repository.
RepoOwner string
// Name of the repository.
RepoName string
// SHA, branch name, or tag.
Ref string
}
// Repository contains information about a requested repository.
type Repository struct {
Name string
Owner string
DefaultBranch string
}
// File contains information about a requested file, including its content.
type File struct {
Path string
Contents io.ReadCloser
SHA string
LastModifiedTime time.Time
LastModifiedSHA string
}
// Client allows various interactions with remote repositories on GitHub.
type Client interface {
GetFile(ctx context.Context, ref *RemoteRef, path string) (*File, error)
CreateBranch(ctx context.Context, req *CreateBranchRequest) error
CreatePullRequest(ctx context.Context, ref *RemoteRef, base, title, body string) (*PullRequestInfo, error)
CreateRepository(ctx context.Context, req *sourcecontrolv1.CreateRepositoryRequest) (*sourcecontrolv1.CreateRepositoryResponse, error)
CreateIssueComment(ctx context.Context, ref *RemoteRef, number int, body string) error
CompareCommits(ctx context.Context, ref *RemoteRef, compareSHA string) (*scgithubv1.CommitComparison, error)
GetCommit(ctx context.Context, ref *RemoteRef) (*Commit, error)
GetRepository(ctx context.Context, ref *RemoteRef) (*Repository, error)
GetOrganization(ctx context.Context, organization string) (*githubv3.Organization, error)
ListOrganizations(ctx context.Context, user string) ([]*githubv3.Organization, error)
GetOrgMembership(ctx context.Context, user, org string) (*githubv3.Membership, error)
GetUser(ctx context.Context, username string) (*githubv3.User, error)
}
// This func can be used to create comments for PRs or Issues
func (s *svc) CreateIssueComment(ctx context.Context, ref *RemoteRef, number int, body string) error {
com := &githubv3.IssueComment{
Body: strPtr(body),
}
_, _, err := s.rest.Issues.CreateComment(ctx, ref.RepoOwner, ref.RepoName, number, com)
return err
}
type PullRequestInfo struct {
Number int
HTMLURL string
}
type svc struct {
graphQL v4client
rest v3client
rawAuth *gittransport.BasicAuth
}
func (s *svc) GetOrganization(ctx context.Context, organization string) (*githubv3.Organization, error) {
org, _, err := s.rest.Organizations.Get(ctx, organization)
if err != nil {
return nil, err
}
return org, nil
}
// ListOrganizations returns all organizations for a specified user.
// To list organizations for the currently authenticated user set user to "".
func (s *svc) ListOrganizations(ctx context.Context, user string) ([]*githubv3.Organization, error) {
organizations, _, err := s.rest.Organizations.List(ctx, user, &githubv3.ListOptions{})
if err != nil {
return nil, err
}
return organizations, nil
}
// GetOrgMembership returns a specified users membership within a specified organization.
// To list organizations for the currently authenticated user set user to "".
func (s *svc) GetOrgMembership(ctx context.Context, user, org string) (*githubv3.Membership, error) {
membership, response, err := s.rest.Organizations.GetOrgMembership(ctx, user, org)
if err != nil {
// A user might be part of an org but not have permissions to get memerbship information if auth is behind SSO.
// In this case we return a default Membership.
if response.StatusCode == 403 {
return &githubv3.Membership{}, nil
}
return nil, err
}
return membership, nil
}
// GetUser returns information about the specified user.
// To list organizations for the currently authenticated user set user to "".
func (s *svc) GetUser(ctx context.Context, username string) (*githubv3.User, error) {
user, _, err := s.rest.Users.Get(ctx, username)
if err != nil {
return nil, err
}
return user, nil
}
func (s *svc) CreateRepository(ctx context.Context, req *sourcecontrolv1.CreateRepositoryRequest) (*sourcecontrolv1.CreateRepositoryResponse, error) {
// Validate that we received GitHub Options.
_, ok := req.Options.(*sourcecontrolv1.CreateRepositoryRequest_GithubOptions)
if !ok {
return nil, status.New(codes.InvalidArgument, "GitHub options were not provided to GitHub service").Err()
}
opts := req.GetGithubOptions()
currentUser, _, err := s.rest.Users.Get(ctx, "")
if err != nil {
return nil, err
}
var org string
if org = req.Owner; currentUser.GetLogin() == req.Owner {
// If the specified owner is the same as the current user the GitHub API expects an empty string.
org = ""
}
repo := &githubv3.Repository{
Name: strPtr(req.Name),
Description: strPtr(req.Description),
Private: boolPtr(opts.Parameters.Visibility.String() == sourcecontrolv1.Visibility_PRIVATE.String()),
AutoInit: boolPtr(opts.AutoInit),
}
newRepo, _, err := s.rest.Repositories.Create(ctx, org, repo)
if err != nil {
return nil, err
}
resp := &sourcecontrolv1.CreateRepositoryResponse{
Url: *newRepo.HTMLURL,
}
return resp, nil
}
func strPtr(s string) *string {
return &s
}
func boolPtr(b bool) *bool {
return &b
}
func (s *svc) CreatePullRequest(ctx context.Context, ref *RemoteRef, base, title, body string) (*PullRequestInfo, error) {
req := &githubv3.NewPullRequest{
Title: strPtr(title),
Head: strPtr(ref.Ref),
Base: strPtr(base),
Body: strPtr(body),
MaintainerCanModify: boolPtr(true),
}
pr, _, err := s.rest.PullRequests.Create(ctx, ref.RepoOwner, ref.RepoName, req)
if err != nil {
return nil, err
}
return &PullRequestInfo{
Number: pr.GetNumber(),
// There are many possible URLs to return, but the HTML one is most human friendly
HTMLURL: pr.GetHTMLURL(),
}, nil
}
type CreateBranchRequest struct {
// The base for the new branch.
Ref *RemoteRef
// The name of the new branch.
BranchName string
// Files and their content. Files will be clobbered with new content or created if they don't already exist.
Files FileMap
// The commit message for files added.
CommitMessage string
}
// Creates a new branch with a commit containing files and pushes it to the remote.
func (s *svc) CreateBranch(ctx context.Context, req *CreateBranchRequest) error {
cloneOpts := &git.CloneOptions{
Depth: 1,
URL: fmt.Sprintf("https://github.com/%s/%s", req.Ref.RepoOwner, req.Ref.RepoName),
ReferenceName: plumbing.NewBranchReferenceName(req.Ref.Ref),
Auth: s.rawAuth,
}
repo, err := git.CloneContext(ctx, memory.NewStorage(), memfs.New(), cloneOpts)
if err != nil {
return err
}
wt, err := repo.Worktree()
if err != nil {
return err
}
checkoutOpts := &git.CheckoutOptions{
Branch: plumbing.NewBranchReferenceName(req.BranchName),
Create: true,
}
if err := wt.Checkout(checkoutOpts); err != nil {
return err
}
for filename, contents := range req.Files {
fh, err := wt.Filesystem.Create(filename)
if err != nil {
return err
}
if _, err := io.Copy(fh, contents); err != nil {
return err
}
}
if err := wt.AddGlob("."); err != nil {
return err
}
if _, err := wt.Commit(req.CommitMessage, &git.CommitOptions{}); err != nil {
return err
}
pushOpts := &git.PushOptions{Auth: s.rawAuth}
if err := repo.PushContext(ctx, pushOpts); err != nil {
return err
}
return nil
}
func newService(config *githubv1.Config) Client {
token := config.GetAccessToken()
tokenSource := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
httpClient := oauth2.NewClient(context.Background(), tokenSource)
rest := githubv3.NewClient(httpClient)
return &svc{
graphQL: githubv4.NewClient(httpClient),
rest: v3client{
Repositories: rest.Repositories,
PullRequests: rest.PullRequests,
Issues: rest.Issues,
Users: rest.Users,
Organizations: rest.Organizations,
},
rawAuth: &gittransport.BasicAuth{
Username: "token",
Password: token,
},
}
}
func (s *svc) GetFile(ctx context.Context, ref *RemoteRef, path string) (*File, error) {
q := &getFileQuery{}
params := map[string]interface{}{
"owner": githubv4.String(ref.RepoOwner),
"name": githubv4.String(ref.RepoName),
"path": githubv4.String(path),
"ref": githubv4.String(ref.Ref),
"refPath": githubv4.String(fmt.Sprintf("%s:%s", ref.Ref, path)),
}
err := s.graphQL.Query(ctx, q, params)
if err != nil {
return nil, err
}
switch {
case q.Repository.Ref.Commit.ID == nil:
return nil, errors.New("ref not found")
case q.Repository.Object.Blob.ID == nil:
return nil, errors.New("object not found")
case bool(q.Repository.Object.Blob.IsTruncated):
return nil, errors.New("object was too large and was truncated by the API")
case bool(q.Repository.Object.Blob.IsBinary):
return nil, errors.New("object is a binary object and cannot be retrieved directly via the API")
}
f := &File{
Path: path,
Contents: ioutil.NopCloser(strings.NewReader(string(q.Repository.Object.Blob.Text))),
SHA: string(q.Repository.Object.Blob.OID),
}
if len(q.Repository.Ref.Commit.History.Nodes) > 0 {
f.LastModifiedTime = q.Repository.Ref.Commit.History.Nodes[0].CommittedDate.Time
f.LastModifiedSHA = string(q.Repository.Ref.Commit.History.Nodes[0].OID)
}
return f, nil
}
func (s *svc) CompareCommits(ctx context.Context, ref *RemoteRef, compareSHA string) (*scgithubv1.CommitComparison, error) {
comp, _, err := s.rest.Repositories.CompareCommits(ctx, ref.RepoOwner, ref.RepoName, compareSHA, ref.Ref)
if err != nil {
return nil, fmt.Errorf("Could not get compare status for %s and %s. %+v", ref.Ref, compareSHA, err)
}
status, ok := scgithubv1.CommitCompareStatus_value[strings.ToUpper(comp.GetStatus())]
if !ok {
return nil, fmt.Errorf("unknown status %s", comp.GetStatus())
}
return &scgithubv1.CommitComparison{
Status: scgithubv1.CommitCompareStatus(status),
}, nil
}
type Commit struct {
Files []*githubv3.CommitFile
Message string
Author *githubv3.User
ParentRef string
}
func (s *svc) GetCommit(ctx context.Context, ref *RemoteRef) (*Commit, error) {
commit, _, err := s.rest.Repositories.GetCommit(ctx, ref.RepoOwner, ref.RepoName, ref.Ref)
if err != nil {
return nil, err
}
// Currently we are using the Author (Github) rather than commit Author (Git)
retCommit := &Commit{
Files: commit.Files,
Message: commit.GetCommit().GetMessage(),
Author: commit.GetAuthor(),
}
if commit.Parents != nil && len(commit.Parents) > 0 {
retCommit.ParentRef = commit.Parents[0].GetSHA()
}
return retCommit, nil
}
func (s *svc) GetRepository(ctx context.Context, repo *RemoteRef) (*Repository, error) {
q := &getRepositoryQuery{}
params := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner),
"name": githubv4.String(repo.RepoName),
}
err := s.graphQL.Query(ctx, q, params)
if err != nil {
return nil, err
}
r := &Repository{
Name: repo.RepoName,
Owner: repo.RepoOwner,
DefaultBranch: string(q.Repository.DefaultBranchRef.Name),
}
return r, nil
}
| 1 | 11,548 | Can you add docstring to this? Also, this might be a nit but I think `CompareCommits` or `CommitRange` might be a better name. What do you think? | lyft-clutch | go |
@@ -33,6 +33,11 @@ import (
func (current *PullSubscription) Validate(ctx context.Context) *apis.FieldError {
errs := current.Spec.Validate(ctx).ViaField("spec")
+
+ if apis.IsInUpdate(ctx) {
+ original := apis.GetBaseline(ctx).(*PullSubscription)
+ errs = errs.Also(current.CheckImmutableFields(ctx, original))
+ }
return duck.ValidateAutoscalingAnnotations(ctx, current.Annotations, errs)
}
| 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"context"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/knative-gcp/pkg/apis/duck"
"github.com/google/knative-gcp/pkg/apis/intevents"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
func (current *PullSubscription) Validate(ctx context.Context) *apis.FieldError {
errs := current.Spec.Validate(ctx).ViaField("spec")
return duck.ValidateAutoscalingAnnotations(ctx, current.Annotations, errs)
}
func (current *PullSubscriptionSpec) Validate(ctx context.Context) *apis.FieldError {
var errs *apis.FieldError
// Topic [required]
if current.Topic == "" {
errs = errs.Also(apis.ErrMissingField("topic"))
}
// Sink [required]
if equality.Semantic.DeepEqual(current.Sink, duckv1.Destination{}) {
errs = errs.Also(apis.ErrMissingField("sink"))
} else if err := current.Sink.Validate(ctx); err != nil {
errs = errs.Also(err.ViaField("sink"))
}
// Transformer [optional]
if current.Transformer != nil && !equality.Semantic.DeepEqual(current.Transformer, &duckv1.Destination{}) {
if err := current.Transformer.Validate(ctx); err != nil {
errs = errs.Also(err.ViaField("transformer"))
}
}
if current.RetentionDuration != nil {
// If set, RetentionDuration Cannot be longer than 7 days or shorter than 10 minutes.
rd, err := time.ParseDuration(*current.RetentionDuration)
if err != nil {
errs = errs.Also(apis.ErrInvalidValue(*current.RetentionDuration, "retentionDuration"))
} else if rd < intevents.MinRetentionDuration || rd > intevents.MaxRetentionDuration {
errs = errs.Also(apis.ErrOutOfBoundsValue(*current.RetentionDuration, intevents.MinRetentionDuration.String(), intevents.MaxRetentionDuration.String(), "retentionDuration"))
}
}
if current.AckDeadline != nil {
// If set, AckDeadline needs to parse to a valid duration.
ad, err := time.ParseDuration(*current.AckDeadline)
if err != nil {
errs = errs.Also(apis.ErrInvalidValue(*current.AckDeadline, "ackDeadline"))
} else if ad < intevents.MinAckDeadline || ad > intevents.MaxAckDeadline {
errs = errs.Also(apis.ErrOutOfBoundsValue(*current.AckDeadline, intevents.MinAckDeadline.String(), intevents.MaxAckDeadline.String(), "ackDeadline"))
}
}
// Mode [optional]
switch current.Mode {
case "", ModeCloudEventsBinary, ModeCloudEventsStructured, ModePushCompatible:
// valid
default:
errs = errs.Also(apis.ErrInvalidValue(current.Mode, "mode"))
}
if current.Secret != nil {
if !equality.Semantic.DeepEqual(current.Secret, &corev1.SecretKeySelector{}) {
err := validateSecret(current.Secret)
if err != nil {
errs = errs.Also(err.ViaField("secret"))
}
}
}
return errs
}
// TODO move this to a common place.
func validateSecret(secret *corev1.SecretKeySelector) *apis.FieldError {
var errs *apis.FieldError
if secret.Name == "" {
errs = errs.Also(apis.ErrMissingField("name"))
}
if secret.Key == "" {
errs = errs.Also(apis.ErrMissingField("key"))
}
return errs
}
func (current *PullSubscription) CheckImmutableFields(ctx context.Context, original *PullSubscription) *apis.FieldError {
if original == nil {
return nil
}
var errs *apis.FieldError
// Modification of Topic, Secret, Mode, AckDeadline, RetainAckedMessages, RetentionDuration and Project are not allowed.
// Everything else is mutable.
if diff := cmp.Diff(original.Spec, current.Spec,
cmpopts.IgnoreFields(PullSubscriptionSpec{},
"Sink", "Transformer", "CloudEventOverrides")); diff != "" {
errs = errs.Also(&apis.FieldError{
Message: "Immutable fields changed (-old +new)",
Paths: []string{"spec"},
Details: diff,
})
}
// Modification of AutoscalingClassAnnotations is not allowed.
errs = duck.CheckImmutableAutoscalingClassAnnotations(¤t.ObjectMeta, &original.ObjectMeta, errs)
// Modification of non-empty cluster name annotation is not allowed.
return duck.CheckImmutableClusterNameAnnotation(¤t.ObjectMeta, &original.ObjectMeta, errs)
}
| 1 | 16,991 | were all these ones missed in the previous PR so you are adding them now? the sources v1 and some of this? Is that right? If so, can you update the PR title | google-knative-gcp | go |
@@ -124,7 +124,10 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo
public static final int VERSION_72 = 8;
/** The version that recorded softDelCount */
public static final int VERSION_74 = 9;
- static final int VERSION_CURRENT = VERSION_74;
+ /** The version that recorded nextWriteDocValuesGen */
+ public static final int VERSION_77 = 10;
+
+ static final int VERSION_CURRENT = VERSION_77;
/** Used to name new segments. */
public long counter; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.FieldInfosFormat;
import org.apache.lucene.codecs.LiveDocsFormat;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.Version;
/**
* A collection of segmentInfo objects with methods for operating on those
* segments in relation to the file system.
* <p>
* The active segments in the index are stored in the segment info file,
* <tt>segments_N</tt>. There may be one or more <tt>segments_N</tt> files in
* the index; however, the one with the largest generation is the active one
* (when older segments_N files are present it's because they temporarily cannot
* be deleted, or a custom {@link IndexDeletionPolicy} is in
* use). This file lists each segment by name and has details about the codec
* and generation of deletes.
* </p>
* <p>
* Files:
* <ul>
* <li><tt>segments_N</tt>: Header, LuceneVersion, Version, NameCounter, SegCount, MinSegmentLuceneVersion, <SegName,
* SegID, SegCodec, DelGen, DeletionCount, FieldInfosGen, DocValuesGen,
* UpdatesFiles><sup>SegCount</sup>, CommitUserData, Footer
* </ul>
* Data types:
* <ul>
* <li>Header --> {@link CodecUtil#writeIndexHeader IndexHeader}</li>
* <li>LuceneVersion --> Which Lucene code {@link Version} was used for this commit, written as three {@link DataOutput#writeVInt vInt}: major, minor, bugfix
* <li>MinSegmentLuceneVersion --> Lucene code {@link Version} of the oldest segment, written as three {@link DataOutput#writeVInt vInt}: major, minor, bugfix; this is only
* written only if there's at least one segment
* <li>NameCounter, SegCount, DeletionCount -->
* {@link DataOutput#writeInt Int32}</li>
* <li>Generation, Version, DelGen, Checksum, FieldInfosGen, DocValuesGen -->
* {@link DataOutput#writeLong Int64}</li>
* <li>SegID --> {@link DataOutput#writeByte Int8<sup>ID_LENGTH</sup>}</li>
* <li>SegName, SegCodec --> {@link DataOutput#writeString String}</li>
* <li>CommitUserData --> {@link DataOutput#writeMapOfStrings
* Map<String,String>}</li>
* <li>UpdatesFiles --> Map<{@link DataOutput#writeInt Int32},
* {@link DataOutput#writeSetOfStrings(Set) Set<String>}></li>
* <li>Footer --> {@link CodecUtil#writeFooter CodecFooter}</li>
* </ul>
* Field Descriptions:
* <ul>
* <li>Version counts how often the index has been changed by adding or deleting
* documents.</li>
* <li>NameCounter is used to generate names for new segment files.</li>
* <li>SegName is the name of the segment, and is used as the file name prefix
* for all of the files that compose the segment's index.</li>
* <li>DelGen is the generation count of the deletes file. If this is -1, there
* are no deletes. Anything above zero means there are deletes stored by
* {@link LiveDocsFormat}.</li>
* <li>DeletionCount records the number of deleted documents in this segment.</li>
* <li>SegCodec is the {@link Codec#getName() name} of the Codec that encoded
* this segment.</li>
* <li>SegID is the identifier of the Codec that encoded this segment. </li>
* <li>CommitUserData stores an optional user-supplied opaque
* Map<String,String> that was passed to
* {@link IndexWriter#setLiveCommitData(Iterable)}.</li>
* <li>FieldInfosGen is the generation count of the fieldInfos file. If this is
* -1, there are no updates to the fieldInfos in that segment. Anything above
* zero means there are updates to fieldInfos stored by {@link FieldInfosFormat}
* .</li>
* <li>DocValuesGen is the generation count of the updatable DocValues. If this
* is -1, there are no updates to DocValues in that segment. Anything above zero
* means there are updates to DocValues stored by {@link DocValuesFormat}.</li>
* <li>UpdatesFiles stores the set of files that were updated in that segment
* per field.</li>
* </ul>
*
* @lucene.experimental
*/
public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo> {
/** The version that added information about the Lucene version at the time when the index has been created. */
public static final int VERSION_70 = 7;
/** The version that updated segment name counter to be long instead of int. */
public static final int VERSION_72 = 8;
/** The version that recorded softDelCount */
public static final int VERSION_74 = 9;
static final int VERSION_CURRENT = VERSION_74;
/** Used to name new segments. */
public long counter;
/** Counts how often the index has been changed. */
public long version;
private long generation; // generation of the "segments_N" for the next commit
private long lastGeneration; // generation of the "segments_N" file we last successfully read
// or wrote; this is normally the same as generation except if
// there was an IOException that had interrupted a commit
/** Opaque Map<String, String> that user can specify during IndexWriter.commit */
public Map<String,String> userData = Collections.emptyMap();
private List<SegmentCommitInfo> segments = new ArrayList<>();
/**
* If non-null, information about loading segments_N files
* will be printed here. @see #setInfoStream.
*/
private static PrintStream infoStream = null;
/** Id for this commit; only written starting with Lucene 5.0 */
private byte[] id;
/** Which Lucene version wrote this commit. */
private Version luceneVersion;
/** Version of the oldest segment in the index, or null if there are no segments. */
private Version minSegmentLuceneVersion;
/** The Lucene version major that was used to create the index. */
private final int indexCreatedVersionMajor;
/** Sole constructor.
* @param indexCreatedVersionMajor the Lucene version major at index creation time, or 6 if the index was created before 7.0 */
public SegmentInfos(int indexCreatedVersionMajor) {
if (indexCreatedVersionMajor > Version.LATEST.major) {
throw new IllegalArgumentException("indexCreatedVersionMajor is in the future: " + indexCreatedVersionMajor);
}
if (indexCreatedVersionMajor < 6) {
throw new IllegalArgumentException("indexCreatedVersionMajor must be >= 6, got: " + indexCreatedVersionMajor);
}
this.indexCreatedVersionMajor = indexCreatedVersionMajor;
}
/** Returns {@link SegmentCommitInfo} at the provided
* index. */
public SegmentCommitInfo info(int i) {
return segments.get(i);
}
/**
* Get the generation of the most recent commit to the
* list of index files (N in the segments_N file).
*
* @param files -- array of file names to check
*/
public static long getLastCommitGeneration(String[] files) {
long max = -1;
for (String file : files) {
if (file.startsWith(IndexFileNames.SEGMENTS) && !file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
long gen = generationFromSegmentsFileName(file);
if (gen > max) {
max = gen;
}
}
}
return max;
}
/**
* Get the generation of the most recent commit to the
* index in this directory (N in the segments_N file).
*
* @param directory -- directory to search for the latest segments_N file
*/
public static long getLastCommitGeneration(Directory directory) throws IOException {
return getLastCommitGeneration(directory.listAll());
}
/**
* Get the filename of the segments_N file for the most
* recent commit in the list of index files.
*
* @param files -- array of file names to check
*/
public static String getLastCommitSegmentsFileName(String[] files) {
return IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
getLastCommitGeneration(files));
}
/**
* Get the filename of the segments_N file for the most
* recent commit to the index in this Directory.
*
* @param directory -- directory to search for the latest segments_N file
*/
public static String getLastCommitSegmentsFileName(Directory directory) throws IOException {
return IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
getLastCommitGeneration(directory));
}
/**
* Get the segments_N filename in use by this segment infos.
*/
public String getSegmentsFileName() {
return IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
lastGeneration);
}
/**
* Parse the generation off the segments file name and
* return it.
*/
public static long generationFromSegmentsFileName(String fileName) {
if (fileName.equals(IndexFileNames.SEGMENTS)) {
return 0;
} else if (fileName.startsWith(IndexFileNames.SEGMENTS)) {
return Long.parseLong(fileName.substring(1+IndexFileNames.SEGMENTS.length()),
Character.MAX_RADIX);
} else {
throw new IllegalArgumentException("fileName \"" + fileName + "\" is not a segments file");
}
}
/** return generation of the next pending_segments_N that will be written */
private long getNextPendingGeneration() {
if (generation == -1) {
return 1;
} else {
return generation+1;
}
}
/** Since Lucene 5.0, every commit (segments_N) writes a unique id. This will
* return that id */
public byte[] getId() {
return id.clone();
}
/**
* Read a particular segmentFileName. Note that this may
* throw an IOException if a commit is in process.
*
* @param directory -- directory containing the segments file
* @param segmentFileName -- segment file to load
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static final SegmentInfos readCommit(Directory directory, String segmentFileName) throws IOException {
long generation = generationFromSegmentsFileName(segmentFileName);
//System.out.println(Thread.currentThread() + ": SegmentInfos.readCommit " + segmentFileName);
try (ChecksumIndexInput input = directory.openChecksumInput(segmentFileName, IOContext.READ)) {
try {
return readCommit(directory, input, generation);
} catch (EOFException | NoSuchFileException | FileNotFoundException e) {
throw new CorruptIndexException("Unexpected file read error while reading index.", input, e);
}
}
}
/** Read the commit from the provided {@link ChecksumIndexInput}. */
public static final SegmentInfos readCommit(Directory directory, ChecksumIndexInput input, long generation) throws IOException {
// NOTE: as long as we want to throw indexformattooold (vs corruptindexexception), we need
// to read the magic ourselves.
int magic = input.readInt();
if (magic != CodecUtil.CODEC_MAGIC) {
throw new IndexFormatTooOldException(input, magic, CodecUtil.CODEC_MAGIC, CodecUtil.CODEC_MAGIC);
}
int format = CodecUtil.checkHeaderNoMagic(input, "segments", VERSION_70, VERSION_CURRENT);
byte id[] = new byte[StringHelper.ID_LENGTH];
input.readBytes(id, 0, id.length);
CodecUtil.checkIndexHeaderSuffix(input, Long.toString(generation, Character.MAX_RADIX));
Version luceneVersion = Version.fromBits(input.readVInt(), input.readVInt(), input.readVInt());
int indexCreatedVersion = input.readVInt();
if (luceneVersion.major < indexCreatedVersion) {
throw new CorruptIndexException("Creation version [" + indexCreatedVersion
+ ".x] can't be greater than the version that wrote the segment infos: [" + luceneVersion + "]" , input);
}
if (indexCreatedVersion < Version.LATEST.major - 1) {
throw new IndexFormatTooOldException(input, "This index was initially created with Lucene "
+ indexCreatedVersion + ".x while the current version is " + Version.LATEST
+ " and Lucene only supports reading the current and previous major versions.");
}
SegmentInfos infos = new SegmentInfos(indexCreatedVersion);
infos.id = id;
infos.generation = generation;
infos.lastGeneration = generation;
infos.luceneVersion = luceneVersion;
infos.version = input.readLong();
//System.out.println("READ sis version=" + infos.version);
if (format > VERSION_70) {
infos.counter = input.readVLong();
} else {
infos.counter = input.readInt();
}
int numSegments = input.readInt();
if (numSegments < 0) {
throw new CorruptIndexException("invalid segment count: " + numSegments, input);
}
if (numSegments > 0) {
infos.minSegmentLuceneVersion = Version.fromBits(input.readVInt(), input.readVInt(), input.readVInt());
} else {
// else leave as null: no segments
}
long totalDocs = 0;
for (int seg = 0; seg < numSegments; seg++) {
String segName = input.readString();
byte[] segmentID = new byte[StringHelper.ID_LENGTH];
input.readBytes(segmentID, 0, segmentID.length);
Codec codec = readCodec(input);
SegmentInfo info = codec.segmentInfoFormat().read(directory, segName, segmentID, IOContext.READ);
info.setCodec(codec);
totalDocs += info.maxDoc();
long delGen = input.readLong();
int delCount = input.readInt();
if (delCount < 0 || delCount > info.maxDoc()) {
throw new CorruptIndexException("invalid deletion count: " + delCount + " vs maxDoc=" + info.maxDoc(), input);
}
long fieldInfosGen = input.readLong();
long dvGen = input.readLong();
int softDelCount = format > VERSION_72 ? input.readInt() : 0;
if (softDelCount < 0 || softDelCount > info.maxDoc()) {
throw new CorruptIndexException("invalid deletion count: " + softDelCount + " vs maxDoc=" + info.maxDoc(), input);
}
if (softDelCount + delCount > info.maxDoc()) {
throw new CorruptIndexException("invalid deletion count: " + softDelCount + delCount + " vs maxDoc=" + info.maxDoc(), input);
}
SegmentCommitInfo siPerCommit = new SegmentCommitInfo(info, delCount, softDelCount, delGen, fieldInfosGen, dvGen);
siPerCommit.setFieldInfosFiles(input.readSetOfStrings());
final Map<Integer,Set<String>> dvUpdateFiles;
final int numDVFields = input.readInt();
if (numDVFields == 0) {
dvUpdateFiles = Collections.emptyMap();
} else {
Map<Integer,Set<String>> map = new HashMap<>(numDVFields);
for (int i = 0; i < numDVFields; i++) {
map.put(input.readInt(), input.readSetOfStrings());
}
dvUpdateFiles = Collections.unmodifiableMap(map);
}
siPerCommit.setDocValuesUpdatesFiles(dvUpdateFiles);
infos.add(siPerCommit);
Version segmentVersion = info.getVersion();
if (segmentVersion.onOrAfter(infos.minSegmentLuceneVersion) == false) {
throw new CorruptIndexException("segments file recorded minSegmentLuceneVersion=" + infos.minSegmentLuceneVersion + " but segment=" + info + " has older version=" + segmentVersion, input);
}
if (infos.indexCreatedVersionMajor >= 7 && segmentVersion.major < infos.indexCreatedVersionMajor) {
throw new CorruptIndexException("segments file recorded indexCreatedVersionMajor=" + infos.indexCreatedVersionMajor + " but segment=" + info + " has older version=" + segmentVersion, input);
}
if (infos.indexCreatedVersionMajor >= 7 && info.getMinVersion() == null) {
throw new CorruptIndexException("segments infos must record minVersion with indexCreatedVersionMajor=" + infos.indexCreatedVersionMajor, input);
}
}
infos.userData = input.readMapOfStrings();
CodecUtil.checkFooter(input);
// LUCENE-6299: check we are in bounds
if (totalDocs > IndexWriter.getActualMaxDocs()) {
throw new CorruptIndexException("Too many documents: an index cannot exceed " + IndexWriter.getActualMaxDocs() + " but readers have total maxDoc=" + totalDocs, input);
}
return infos;
}
private static Codec readCodec(DataInput input) throws IOException {
final String name = input.readString();
try {
return Codec.forName(name);
} catch (IllegalArgumentException e) {
// maybe it's an old default codec that moved
if (name.startsWith("Lucene")) {
throw new IllegalArgumentException("Could not load codec '" + name + "'. Did you forget to add lucene-backward-codecs.jar?", e);
}
throw e;
}
}
/** Find the latest commit ({@code segments_N file}) and
* load all {@link SegmentCommitInfo}s. */
public static final SegmentInfos readLatestCommit(Directory directory) throws IOException {
return new FindSegmentsFile<SegmentInfos>(directory) {
@Override
protected SegmentInfos doBody(String segmentFileName) throws IOException {
return readCommit(directory, segmentFileName);
}
}.run();
}
// Only true after prepareCommit has been called and
// before finishCommit is called
boolean pendingCommit;
private void write(Directory directory) throws IOException {
long nextGeneration = getNextPendingGeneration();
String segmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS,
"",
nextGeneration);
// Always advance the generation on write:
generation = nextGeneration;
IndexOutput segnOutput = null;
boolean success = false;
try {
segnOutput = directory.createOutput(segmentFileName, IOContext.DEFAULT);
write(directory, segnOutput);
segnOutput.close();
directory.sync(Collections.singleton(segmentFileName));
success = true;
} finally {
if (success) {
pendingCommit = true;
} else {
// We hit an exception above; try to close the file
// but suppress any exception:
IOUtils.closeWhileHandlingException(segnOutput);
// Try not to leave a truncated segments_N file in
// the index:
IOUtils.deleteFilesIgnoringExceptions(directory, segmentFileName);
}
}
}
/** Write ourselves to the provided {@link IndexOutput} */
public void write(Directory directory, IndexOutput out) throws IOException {
CodecUtil.writeIndexHeader(out, "segments", VERSION_CURRENT,
StringHelper.randomId(), Long.toString(generation, Character.MAX_RADIX));
out.writeVInt(Version.LATEST.major);
out.writeVInt(Version.LATEST.minor);
out.writeVInt(Version.LATEST.bugfix);
//System.out.println(Thread.currentThread().getName() + ": now write " + out.getName() + " with version=" + version);
out.writeVInt(indexCreatedVersionMajor);
out.writeLong(version);
out.writeVLong(counter); // write counter
out.writeInt(size());
if (size() > 0) {
Version minSegmentVersion = null;
// We do a separate loop up front so we can write the minSegmentVersion before
// any SegmentInfo; this makes it cleaner to throw IndexFormatTooOldExc at read time:
for (SegmentCommitInfo siPerCommit : this) {
Version segmentVersion = siPerCommit.info.getVersion();
if (minSegmentVersion == null || segmentVersion.onOrAfter(minSegmentVersion) == false) {
minSegmentVersion = segmentVersion;
}
}
out.writeVInt(minSegmentVersion.major);
out.writeVInt(minSegmentVersion.minor);
out.writeVInt(minSegmentVersion.bugfix);
}
// write infos
for (SegmentCommitInfo siPerCommit : this) {
SegmentInfo si = siPerCommit.info;
if (indexCreatedVersionMajor >= 7 && si.minVersion == null) {
throw new IllegalStateException("Segments must record minVersion if they have been created on or after Lucene 7: " + si);
}
out.writeString(si.name);
byte segmentID[] = si.getId();
if (segmentID.length != StringHelper.ID_LENGTH) {
throw new IllegalStateException("cannot write segment: invalid id segment=" + si.name + "id=" + StringHelper.idToString(segmentID));
}
out.writeBytes(segmentID, segmentID.length);
out.writeString(si.getCodec().getName());
out.writeLong(siPerCommit.getDelGen());
int delCount = siPerCommit.getDelCount();
if (delCount < 0 || delCount > si.maxDoc()) {
throw new IllegalStateException("cannot write segment: invalid maxDoc segment=" + si.name + " maxDoc=" + si.maxDoc() + " delCount=" + delCount);
}
out.writeInt(delCount);
out.writeLong(siPerCommit.getFieldInfosGen());
out.writeLong(siPerCommit.getDocValuesGen());
int softDelCount = siPerCommit.getSoftDelCount();
if (softDelCount < 0 || softDelCount > si.maxDoc()) {
throw new IllegalStateException("cannot write segment: invalid maxDoc segment=" + si.name + " maxDoc=" + si.maxDoc() + " softDelCount=" + softDelCount);
}
out.writeInt(softDelCount);
out.writeSetOfStrings(siPerCommit.getFieldInfosFiles());
final Map<Integer,Set<String>> dvUpdatesFiles = siPerCommit.getDocValuesUpdatesFiles();
out.writeInt(dvUpdatesFiles.size());
for (Entry<Integer,Set<String>> e : dvUpdatesFiles.entrySet()) {
out.writeInt(e.getKey());
out.writeSetOfStrings(e.getValue());
}
}
out.writeMapOfStrings(userData);
CodecUtil.writeFooter(out);
}
/**
* Returns a copy of this instance, also copying each
* SegmentInfo.
*/
@Override
public SegmentInfos clone() {
try {
final SegmentInfos sis = (SegmentInfos) super.clone();
// deep clone, first recreate all collections:
sis.segments = new ArrayList<>(size());
for(final SegmentCommitInfo info : this) {
assert info.info.getCodec() != null;
// dont directly access segments, use add method!!!
sis.add(info.clone());
}
sis.userData = new HashMap<>(userData);
return sis;
} catch (CloneNotSupportedException e) {
throw new RuntimeException("should not happen", e);
}
}
/**
* version number when this SegmentInfos was generated.
*/
public long getVersion() {
return version;
}
/** Returns current generation. */
public long getGeneration() {
return generation;
}
/** Returns last succesfully read or written generation. */
public long getLastGeneration() {
return lastGeneration;
}
/** If non-null, information about retries when loading
* the segments file will be printed to this.
*/
public static void setInfoStream(PrintStream infoStream) {
SegmentInfos.infoStream = infoStream;
}
/**
* Returns {@code infoStream}.
*
* @see #setInfoStream
*/
public static PrintStream getInfoStream() {
return infoStream;
}
/**
* Prints the given message to the infoStream. Note, this method does not
* check for null infoStream. It assumes this check has been performed by the
* caller, which is recommended to avoid the (usually) expensive message
* creation.
*/
private static void message(String message) {
infoStream.println("SIS [" + Thread.currentThread().getName() + "]: " + message);
}
/**
* Utility class for executing code that needs to do
* something with the current segments file. This is
* necessary with lock-less commits because from the time
* you locate the current segments file name, until you
* actually open it, read its contents, or check modified
* time, etc., it could have been deleted due to a writer
* commit finishing.
*/
public abstract static class FindSegmentsFile<T> {
final Directory directory;
/** Sole constructor. */
public FindSegmentsFile(Directory directory) {
this.directory = directory;
}
/** Locate the most recent {@code segments} file and
* run {@link #doBody} on it. */
public T run() throws IOException {
return run(null);
}
/** Run {@link #doBody} on the provided commit. */
public T run(IndexCommit commit) throws IOException {
if (commit != null) {
if (directory != commit.getDirectory())
throw new IOException("the specified commit does not match the specified Directory");
return doBody(commit.getSegmentsFileName());
}
long lastGen = -1;
long gen = -1;
IOException exc = null;
// Loop until we succeed in calling doBody() without
// hitting an IOException. An IOException most likely
// means an IW deleted our commit while opening
// the time it took us to load the now-old infos files
// (and segments files). It's also possible it's a
// true error (corrupt index). To distinguish these,
// on each retry we must see "forward progress" on
// which generation we are trying to load. If we
// don't, then the original error is real and we throw
// it.
for (;;) {
lastGen = gen;
String files[] = directory.listAll();
String files2[] = directory.listAll();
Arrays.sort(files);
Arrays.sort(files2);
if (!Arrays.equals(files, files2)) {
// listAll() is weakly consistent, this means we hit "concurrent modification exception"
continue;
}
gen = getLastCommitGeneration(files);
if (infoStream != null) {
message("directory listing gen=" + gen);
}
if (gen == -1) {
throw new IndexNotFoundException("no segments* file found in " + directory + ": files: " + Arrays.toString(files));
} else if (gen > lastGen) {
String segmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen);
try {
T t = doBody(segmentFileName);
if (infoStream != null) {
message("success on " + segmentFileName);
}
return t;
} catch (IOException err) {
// Save the original root cause:
if (exc == null) {
exc = err;
}
if (infoStream != null) {
message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: gen = " + gen);
}
}
} else {
throw exc;
}
}
}
/**
* Subclass must implement this. The assumption is an
* IOException will be thrown if something goes wrong
* during the processing that could have been caused by
* a writer committing.
*/
protected abstract T doBody(String segmentFileName) throws IOException;
}
/** Carry over generation numbers from another SegmentInfos
*
* @lucene.internal */
public void updateGeneration(SegmentInfos other) {
lastGeneration = other.lastGeneration;
generation = other.generation;
}
// Carry over generation numbers, and version/counter, from another SegmentInfos
void updateGenerationVersionAndCounter(SegmentInfos other) {
updateGeneration(other);
this.version = other.version;
this.counter = other.counter;
}
/** Set the generation to be used for the next commit */
public void setNextWriteGeneration(long generation) {
if (generation < this.generation) {
throw new IllegalStateException("cannot decrease generation to " + generation + " from current generation " + this.generation);
}
this.generation = generation;
}
final void rollbackCommit(Directory dir) {
if (pendingCommit) {
pendingCommit = false;
// we try to clean up our pending_segments_N
// Must carefully compute fileName from "generation"
// since lastGeneration isn't incremented:
final String pending = IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, "", generation);
// Suppress so we keep throwing the original exception
// in our caller
IOUtils.deleteFilesIgnoringExceptions(dir, pending);
}
}
/** Call this to start a commit. This writes the new
* segments file, but writes an invalid checksum at the
* end, so that it is not visible to readers. Once this
* is called you must call {@link #finishCommit} to complete
* the commit or {@link #rollbackCommit} to abort it.
* <p>
* Note: {@link #changed()} should be called prior to this
* method if changes have been made to this {@link SegmentInfos} instance
* </p>
**/
final void prepareCommit(Directory dir) throws IOException {
if (pendingCommit) {
throw new IllegalStateException("prepareCommit was already called");
}
dir.syncMetaData();
write(dir);
}
/** Returns all file names referenced by SegmentInfo.
* The returned collection is recomputed on each
* invocation. */
public Collection<String> files(boolean includeSegmentsFile) throws IOException {
HashSet<String> files = new HashSet<>();
if (includeSegmentsFile) {
final String segmentFileName = getSegmentsFileName();
if (segmentFileName != null) {
files.add(segmentFileName);
}
}
final int size = size();
for(int i=0;i<size;i++) {
final SegmentCommitInfo info = info(i);
files.addAll(info.files());
}
return files;
}
/** Returns the committed segments_N filename. */
final String finishCommit(Directory dir) throws IOException {
if (pendingCommit == false) {
throw new IllegalStateException("prepareCommit was not called");
}
boolean success = false;
final String dest;
try {
final String src = IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, "", generation);
dest = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
dir.rename(src, dest);
dir.syncMetaData();
success = true;
} finally {
if (!success) {
// deletes pending_segments_N:
rollbackCommit(dir);
}
}
pendingCommit = false;
lastGeneration = generation;
return dest;
}
/** Writes and syncs to the Directory dir, taking care to
* remove the segments file on exception
* <p>
* Note: {@link #changed()} should be called prior to this
* method if changes have been made to this {@link SegmentInfos} instance
* </p>
**/
public final void commit(Directory dir) throws IOException {
prepareCommit(dir);
finishCommit(dir);
}
/** Returns readable description of this segment. */
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append(getSegmentsFileName()).append(": ");
final int count = size();
for(int i = 0; i < count; i++) {
if (i > 0) {
buffer.append(' ');
}
final SegmentCommitInfo info = info(i);
buffer.append(info.toString(0));
}
return buffer.toString();
}
/** Return {@code userData} saved with this commit.
*
* @see IndexWriter#commit()
*/
public Map<String,String> getUserData() {
return userData;
}
/** Sets the commit data. */
public void setUserData(Map<String,String> data, boolean doIncrementVersion) {
if (data == null) {
userData = Collections.<String,String>emptyMap();
} else {
userData = data;
}
if (doIncrementVersion) {
changed();
}
}
/** Replaces all segments in this instance, but keeps
* generation, version, counter so that future commits
* remain write once.
*/
void replace(SegmentInfos other) {
rollbackSegmentInfos(other.asList());
lastGeneration = other.lastGeneration;
}
/** Returns sum of all segment's maxDocs. Note that
* this does not include deletions */
public int totalMaxDoc() {
long count = 0;
for(SegmentCommitInfo info : this) {
count += info.info.maxDoc();
}
// we should never hit this, checks should happen elsewhere...
assert count <= IndexWriter.getActualMaxDocs();
return Math.toIntExact(count);
}
/** Call this before committing if changes have been made to the
* segments. */
public void changed() {
version++;
//System.out.println(Thread.currentThread().getName() + ": SIS.change to version=" + version);
//new Throwable().printStackTrace(System.out);
}
void setVersion(long newVersion) {
if (newVersion < version) {
throw new IllegalArgumentException("newVersion (=" + newVersion + ") cannot be less than current version (=" + version + ")");
}
//System.out.println(Thread.currentThread().getName() + ": SIS.setVersion change from " + version + " to " + newVersion);
version = newVersion;
}
/** applies all changes caused by committing a merge to this SegmentInfos */
void applyMergeChanges(MergePolicy.OneMerge merge, boolean dropSegment) {
if (indexCreatedVersionMajor >= 7 && merge.info.info.minVersion == null) {
throw new IllegalArgumentException("All segments must record the minVersion for indices created on or after Lucene 7");
}
final Set<SegmentCommitInfo> mergedAway = new HashSet<>(merge.segments);
boolean inserted = false;
int newSegIdx = 0;
for (int segIdx = 0, cnt = segments.size(); segIdx < cnt; segIdx++) {
assert segIdx >= newSegIdx;
final SegmentCommitInfo info = segments.get(segIdx);
if (mergedAway.contains(info)) {
if (!inserted && !dropSegment) {
segments.set(segIdx, merge.info);
inserted = true;
newSegIdx++;
}
} else {
segments.set(newSegIdx, info);
newSegIdx++;
}
}
// the rest of the segments in list are duplicates, so don't remove from map, only list!
segments.subList(newSegIdx, segments.size()).clear();
// Either we found place to insert segment, or, we did
// not, but only because all segments we merged becamee
// deleted while we are merging, in which case it should
// be the case that the new segment is also all deleted,
// we insert it at the beginning if it should not be dropped:
if (!inserted && !dropSegment) {
segments.add(0, merge.info);
}
}
List<SegmentCommitInfo> createBackupSegmentInfos() {
final List<SegmentCommitInfo> list = new ArrayList<>(size());
for(final SegmentCommitInfo info : this) {
assert info.info.getCodec() != null;
list.add(info.clone());
}
return list;
}
void rollbackSegmentInfos(List<SegmentCommitInfo> infos) {
this.clear();
this.addAll(infos);
}
/** Returns an <b>unmodifiable</b> {@link Iterator} of contained segments in order. */
// @Override (comment out until Java 6)
@Override
public Iterator<SegmentCommitInfo> iterator() {
return asList().iterator();
}
/** Returns all contained segments as an <b>unmodifiable</b> {@link List} view. */
public List<SegmentCommitInfo> asList() {
return Collections.unmodifiableList(segments);
}
/** Returns number of {@link SegmentCommitInfo}s. */
public int size() {
return segments.size();
}
/** Appends the provided {@link SegmentCommitInfo}. */
public void add(SegmentCommitInfo si) {
if (indexCreatedVersionMajor >= 7 && si.info.minVersion == null) {
throw new IllegalArgumentException("All segments must record the minVersion for indices created on or after Lucene 7");
}
segments.add(si);
}
/** Appends the provided {@link SegmentCommitInfo}s. */
public void addAll(Iterable<SegmentCommitInfo> sis) {
for (final SegmentCommitInfo si : sis) {
this.add(si);
}
}
/** Clear all {@link SegmentCommitInfo}s. */
public void clear() {
segments.clear();
}
/** Remove the provided {@link SegmentCommitInfo}.
*
* <p><b>WARNING</b>: O(N) cost */
public boolean remove(SegmentCommitInfo si) {
return segments.remove(si);
}
/** Remove the {@link SegmentCommitInfo} at the
* provided index.
*
* <p><b>WARNING</b>: O(N) cost */
void remove(int index) {
segments.remove(index);
}
/** Return true if the provided {@link
* SegmentCommitInfo} is contained.
*
* <p><b>WARNING</b>: O(N) cost */
boolean contains(SegmentCommitInfo si) {
return segments.contains(si);
}
/** Returns index of the provided {@link
* SegmentCommitInfo}.
*
* <p><b>WARNING</b>: O(N) cost */
int indexOf(SegmentCommitInfo si) {
return segments.indexOf(si);
}
/** Returns which Lucene {@link Version} wrote this commit, or null if the
* version this index was written with did not directly record the version. */
public Version getCommitLuceneVersion() {
return luceneVersion;
}
/** Returns the version of the oldest segment, or null if there are no segments. */
public Version getMinSegmentLuceneVersion() {
return minSegmentLuceneVersion;
}
/** Return the version major that was used to initially create the index.
* This version is set when the index is first created and then never
* changes. This information was added as of version 7.0 so older
* indices report 6 as a creation version. */
public int getIndexCreatedVersionMajor() {
return indexCreatedVersionMajor;
}
}
| 1 | 28,816 | hmm should this be `VERSION_81` instead? | apache-lucene-solr | java |
@@ -50,7 +50,9 @@ const (
pendingDeltasFlushThreshold = 128
// trieRebuildAccountChunkSize defines the number of accounts that would get read at a single chunk
// before added to the trie during trie construction
- trieRebuildAccountChunkSize = 512
+ trieRebuildAccountChunkSize = 16384
+ // trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
+ trieRebuildCommitFrequency = 65536
// trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
// we attempt to commit them to disk while writing a batch of rounds balances to disk.
trieAccumulatedChangesFlush = 256 | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package ledger
import (
"context"
"database/sql"
"encoding/hex"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"sync"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merkletrie"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
const (
// balancesFlushInterval defines how frequently we want to flush our balances to disk.
balancesFlushInterval = 5 * time.Second
// pendingDeltasFlushThreshold is the deltas count threshold above we flush the pending balances regardless of the flush interval.
pendingDeltasFlushThreshold = 128
// trieRebuildAccountChunkSize defines the number of accounts that would get read at a single chunk
// before added to the trie during trie construction
trieRebuildAccountChunkSize = 512
// trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
// we attempt to commit them to disk while writing a batch of rounds balances to disk.
trieAccumulatedChangesFlush = 256
)
// trieCachedNodesCount defines how many balances trie nodes we would like to keep around in memory.
// value was calibrated using BenchmarkCalibrateCacheNodeSize
var trieCachedNodesCount = 9000
// A modifiedAccount represents an account that has been modified since
// the persistent state stored in the account DB (i.e., in the range of
// rounds covered by the accountUpdates tracker).
type modifiedAccount struct {
// data stores the most recent AccountData for this modified
// account.
data basics.AccountData
// ndelta keeps track of how many times this account appears in
// accountUpdates.deltas. This is used to evict modifiedAccount
// entries when all changes to an account have been reflected in
// the account DB, and no outstanding modifications remain.
ndeltas int
}
type modifiedCreatable struct {
// Type of the creatable: app or asset
ctype basics.CreatableType
// Created if true, deleted if false
created bool
// creator of the app/asset
creator basics.Address
// Keeps track of how many times this app/asset appears in
// accountUpdates.creatableDeltas
ndeltas int
}
type accountUpdates struct {
// constant variables ( initialized on initialize, and never changed afterward )
// initAccounts specifies initial account values for database.
initAccounts map[basics.Address]basics.AccountData
// initProto specifies the initial consensus parameters.
initProto config.ConsensusParams
// dbDirectory is the directory where the ledger and block sql file resides as well as the parent directroy for the catchup files to be generated
dbDirectory string
// catchpointInterval is the configured interval at which the accountUpdates would generate catchpoint labels and catchpoint files.
catchpointInterval uint64
// archivalLedger determines whether the associated ledger was configured as archival ledger or not.
archivalLedger bool
// catchpointFileHistoryLength defines how many catchpoint files we want to store back.
// 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
catchpointFileHistoryLength int
// vacuumOnStartup controls whether the accounts database would get vacuumed on startup.
vacuumOnStartup bool
// dynamic variables
// Connection to the database.
dbs dbPair
// Prepared SQL statements for fast accounts DB lookups.
accountsq *accountsDbQueries
// dbRound is always exactly accountsRound(),
// cached to avoid SQL queries.
dbRound basics.Round
// deltas stores updates for every round after dbRound.
deltas []map[basics.Address]accountDelta
// accounts stores the most recent account state for every
// address that appears in deltas.
accounts map[basics.Address]modifiedAccount
// creatableDeltas stores creatable updates for every round after dbRound.
creatableDeltas []map[basics.CreatableIndex]modifiedCreatable
// creatables stores the most recent state for every creatable that
// appears in creatableDeltas
creatables map[basics.CreatableIndex]modifiedCreatable
// protos stores consensus parameters dbRound and every
// round after it; i.e., protos is one longer than deltas.
protos []config.ConsensusParams
// totals stores the totals for dbRound and every round after it;
// i.e., totals is one longer than deltas.
roundTotals []AccountTotals
// roundDigest stores the digest of the block for every round starting with dbRound and every round after it.
roundDigest []crypto.Digest
// log copied from ledger
log logging.Logger
// lastFlushTime is the time we last flushed updates to
// the accounts DB (bumping dbRound).
lastFlushTime time.Time
// ledger is the source ledger, which is used to syncronize
// the rounds at which we need to flush the balances to disk
// in favor of the catchpoint to be generated.
ledger ledgerForTracker
// The Trie tracking the current account balances. Always matches the balances that were
// written to the database.
balancesTrie *merkletrie.Trie
// The last catchpoint label that was writted to the database. Should always align with what's in the database.
// note that this is the last catchpoint *label* and not the catchpoint file.
lastCatchpointLabel string
// catchpointWriting help to syncronize the catchpoint file writing. When this channel is closed, no writting is going on.
// the channel is non-closed while writing the current accounts state to disk.
catchpointWriting chan struct{}
// catchpointSlowWriting suggest to the accounts writer that it should finish writing up the catchpoint file ASAP.
// when this channel is closed, the accounts writer would try and complete the writing as soon as possible.
// otherwise, it would take it's time and perform periodic sleeps between chunks processing.
catchpointSlowWriting chan struct{}
// ctx is the context for the committing go-routine. It's also used as the "parent" of the catchpoint generation operation.
ctx context.Context
// ctxCancel is the canceling function for canceling the commiting go-routine ( i.e. signaling the commiting go-routine that it's time to abort )
ctxCancel context.CancelFunc
// deltasAccum stores the accumulated deltas for every round starting dbRound-1.
deltasAccum []int
// committedOffset is the offset at which we'd like to persist all the previous account information to disk.
committedOffset chan deferedCommit
// accountsMu is the syncronization mutex for accessing the various non-static varaibles.
accountsMu deadlock.RWMutex
// accountsWriting provides syncronization around the background writing of account balances.
accountsWriting sync.WaitGroup
// commitSyncerClosed is the blocking channel for syncronizing closing the commitSyncer goroutine. Once it's closed, the
// commitSyncer can be assumed to have aborted.
commitSyncerClosed chan struct{}
}
type deferedCommit struct {
offset uint64
dbRound basics.Round
lookback basics.Round
}
// initialize initializes the accountUpdates structure
func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, genesisProto config.ConsensusParams, genesisAccounts map[basics.Address]basics.AccountData) {
au.initProto = genesisProto
au.initAccounts = genesisAccounts
au.dbDirectory = filepath.Dir(dbPathPrefix)
au.archivalLedger = cfg.Archival
au.catchpointInterval = cfg.CatchpointInterval
au.catchpointFileHistoryLength = cfg.CatchpointFileHistoryLength
if cfg.CatchpointFileHistoryLength < -1 {
au.catchpointFileHistoryLength = -1
}
au.vacuumOnStartup = cfg.OptimizeAccountsDatabaseOnStartup
// initialize the commitSyncerClosed with a closed channel ( since the commitSyncer go-routine is not active )
au.commitSyncerClosed = make(chan struct{})
close(au.commitSyncerClosed)
}
// loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional
// The close function is expected to be call in pair with loadFromDisk
func (au *accountUpdates) loadFromDisk(l ledgerForTracker) error {
au.accountsMu.Lock()
defer au.accountsMu.Unlock()
var writingCatchpointRound uint64
lastBalancesRound, lastestBlockRound, err := au.initializeFromDisk(l)
if err != nil {
return err
}
var writingCatchpointDigest crypto.Digest
writingCatchpointRound, _, err = au.accountsq.readCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint)
if err != nil {
return err
}
writingCatchpointDigest, err = au.initializeCaches(lastBalancesRound, lastestBlockRound, basics.Round(writingCatchpointRound))
if err != nil {
return err
}
if writingCatchpointRound != 0 && au.catchpointInterval != 0 {
au.generateCatchpoint(basics.Round(writingCatchpointRound), au.lastCatchpointLabel, writingCatchpointDigest, time.Duration(0))
}
return nil
}
// waitAccountsWriting waits for all the pending ( or current ) account writing to be completed.
func (au *accountUpdates) waitAccountsWriting() {
au.accountsWriting.Wait()
}
// close closes the accountUpdates, waiting for all the child go-routine to complete
func (au *accountUpdates) close() {
if au.ctxCancel != nil {
au.ctxCancel()
}
au.waitAccountsWriting()
// this would block until the commitSyncerClosed channel get closed.
<-au.commitSyncerClosed
}
// Lookup returns the accound data for a given address at a given round. The withRewards indicates whether the
// rewards should be added to the AccountData before returning. Note that the function doesn't update the account with the rewards,
// even while it could return the AccoutData which represent the "rewarded" account data.
func (au *accountUpdates) Lookup(rnd basics.Round, addr basics.Address, withRewards bool) (data basics.AccountData, err error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
return au.lookupImpl(rnd, addr, withRewards)
}
// ListAssets lists the assets by their asset index, limiting to the first maxResults
func (au *accountUpdates) ListAssets(maxAssetIdx basics.AssetIndex, maxResults uint64) ([]basics.CreatableLocator, error) {
return au.listCreatables(basics.CreatableIndex(maxAssetIdx), maxResults, basics.AssetCreatable)
}
// ListApplications lists the application by their app index, limiting to the first maxResults
func (au *accountUpdates) ListApplications(maxAppIdx basics.AppIndex, maxResults uint64) ([]basics.CreatableLocator, error) {
return au.listCreatables(basics.CreatableIndex(maxAppIdx), maxResults, basics.AppCreatable)
}
// listCreatables lists the application/asset by their app/asset index, limiting to the first maxResults
func (au *accountUpdates) listCreatables(maxCreatableIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) ([]basics.CreatableLocator, error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
// Sort indices for creatables that have been created/deleted. If this
// turns out to be too inefficient, we could keep around a heap of
// created/deleted asset indices in memory.
keys := make([]basics.CreatableIndex, 0, len(au.creatables))
for cidx, delta := range au.creatables {
if delta.ctype != ctype {
continue
}
if cidx <= maxCreatableIdx {
keys = append(keys, cidx)
}
}
sort.Slice(keys, func(i, j int) bool { return keys[i] > keys[j] })
// Check for creatables that haven't been synced to disk yet.
var unsyncedCreatables []basics.CreatableLocator
deletedCreatables := make(map[basics.CreatableIndex]bool)
for _, cidx := range keys {
delta := au.creatables[cidx]
if delta.created {
// Created but only exists in memory
unsyncedCreatables = append(unsyncedCreatables, basics.CreatableLocator{
Type: delta.ctype,
Index: cidx,
Creator: delta.creator,
})
} else {
// Mark deleted creatables for exclusion from the results set
deletedCreatables[cidx] = true
}
}
// Check in-memory created creatables, which will always be newer than anything
// in the database
var res []basics.CreatableLocator
for _, loc := range unsyncedCreatables {
if uint64(len(res)) == maxResults {
return res, nil
}
res = append(res, loc)
}
// Fetch up to maxResults - len(res) + len(deletedCreatables) from the database,
// so we have enough extras in case creatables were deleted
numToFetch := maxResults - uint64(len(res)) + uint64(len(deletedCreatables))
dbResults, err := au.accountsq.listCreatables(maxCreatableIdx, numToFetch, ctype)
if err != nil {
return nil, err
}
// Now we merge the database results with the in-memory results
for _, loc := range dbResults {
// Check if we have enough results
if uint64(len(res)) == maxResults {
return res, nil
}
// Creatable was deleted
if _, ok := deletedCreatables[loc.Index]; ok {
continue
}
// We're OK to include this result
res = append(res, loc)
}
return res, nil
}
// GetLastCatchpointLabel retrieves the last catchpoint label that was stored to the database.
func (au *accountUpdates) GetLastCatchpointLabel() string {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
return au.lastCatchpointLabel
}
// GetCreatorForRound returns the creator for a given asset/app index at a given round
func (au *accountUpdates) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
return au.getCreatorForRoundImpl(rnd, cidx, ctype)
}
// committedUpTo enqueues commiting the balances for round committedRound-lookback.
// The defered committing is done so that we could calculate the historical balances lookback rounds back.
// Since we don't want to hold off the tracker's mutex for too long, we'll defer the database persistance of this
// operation to a syncer goroutine. The one caviat is that when storing a catchpoint round, we would want to
// wait until the catchpoint creation is done, so that the persistance of the catchpoint file would have an
// uninterrupted view of the balances at a given point of time.
func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound basics.Round) {
var isCatchpointRound, hasMultipleIntermediateCatchpoint bool
var offset uint64
var dc deferedCommit
au.accountsMu.RLock()
defer func() {
au.accountsMu.RUnlock()
if dc.offset != 0 {
au.committedOffset <- dc
}
}()
retRound = basics.Round(0)
var pendingDeltas int
lookback := basics.Round(au.protos[len(au.protos)-1].MaxBalLookback)
if committedRound < lookback {
return
}
retRound = au.dbRound
newBase := committedRound - lookback
if newBase <= au.dbRound {
// Already forgotten
return
}
if newBase > au.dbRound+basics.Round(len(au.deltas)) {
au.log.Panicf("committedUpTo: block %d too far in the future, lookback %d, dbRound %d, deltas %d", committedRound, lookback, au.dbRound, len(au.deltas))
}
hasIntermediateCatchpoint := false
hasMultipleIntermediateCatchpoint = false
// check if there was a catchpoint between au.dbRound+lookback and newBase+lookback
if au.catchpointInterval > 0 {
nextCatchpointRound := ((uint64(au.dbRound+lookback) + au.catchpointInterval) / au.catchpointInterval) * au.catchpointInterval
if nextCatchpointRound < uint64(newBase+lookback) {
mostRecentCatchpointRound := (uint64(committedRound) / au.catchpointInterval) * au.catchpointInterval
newBase = basics.Round(nextCatchpointRound) - lookback
if mostRecentCatchpointRound > nextCatchpointRound {
hasMultipleIntermediateCatchpoint = true
// skip if there is more than one catchpoint in queue
newBase = basics.Round(mostRecentCatchpointRound) - lookback
}
hasIntermediateCatchpoint = true
}
}
// if we're still writing the previous balances, we can't move forward yet.
select {
case <-au.catchpointWriting:
// the channel catchpointWriting is currently closed, meaning that we're currently not writing any
// catchpoint file. At this point, we should attempt to enqueue further tasks as usual.
default:
// if we hit this path, it means that the channel is currently non-closed, which means that we're still writing a catchpoint.
// see if we're writing a catchpoint in that range.
if hasIntermediateCatchpoint {
// check if we're already attempting to perform fast-writing.
select {
case <-au.catchpointSlowWriting:
// yes, we're already doing fast-writing.
default:
// no, we're not yet doing fast writing, make it so.
close(au.catchpointSlowWriting)
}
}
return
}
offset = uint64(newBase - au.dbRound)
// check to see if this is a catchpoint round
isCatchpointRound = ((offset + uint64(lookback+au.dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+au.dbRound))) % au.catchpointInterval))
// calculate the number of pending deltas
pendingDeltas = au.deltasAccum[offset] - au.deltasAccum[0]
// If we recently flushed, wait to aggregate some more blocks.
// ( unless we're creating a catchpoint, in which case we want to flush it right away
// so that all the instances of the catchpoint would contain the exacy same data )
flushTime := time.Now()
if !flushTime.After(au.lastFlushTime.Add(balancesFlushInterval)) && !isCatchpointRound && pendingDeltas < pendingDeltasFlushThreshold {
return au.dbRound
}
if isCatchpointRound && au.archivalLedger {
au.catchpointWriting = make(chan struct{}, 1)
au.catchpointSlowWriting = make(chan struct{}, 1)
if hasMultipleIntermediateCatchpoint {
close(au.catchpointSlowWriting)
}
}
dc = deferedCommit{
offset: offset,
dbRound: au.dbRound,
lookback: lookback,
}
au.accountsWriting.Add(1)
return
}
// newBlock is the accountUpdates implementation of the ledgerTracker interface. This is the "external" facing function
// which invokes the internal implementation after taking the lock.
func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta StateDelta) {
au.accountsMu.Lock()
defer au.accountsMu.Unlock()
au.newBlockImpl(blk, delta)
}
// Totals returns the totals for a given round
func (au *accountUpdates) Totals(rnd basics.Round) (totals AccountTotals, err error) {
au.accountsMu.RLock()
defer au.accountsMu.RUnlock()
return au.totalsImpl(rnd)
}
// GetCatchpointStream returns an io.Reader to the catchpoint file associated with the provided round
func (au *accountUpdates) GetCatchpointStream(round basics.Round) (io.ReadCloser, error) {
dbFileName := ""
err := au.dbs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
dbFileName, _, _, err = getCatchpoint(tx, round)
return
})
if err != nil && err != sql.ErrNoRows {
// we had some sql error.
return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to lookup catchpoint %d: %v", round, err)
}
if dbFileName != "" {
catchpointPath := filepath.Join(au.dbDirectory, dbFileName)
file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
if err == nil && file != nil {
return file, nil
}
// else, see if this is a file-not-found error
if os.IsNotExist(err) {
// the database told us that we have this file.. but we couldn't find it.
// delete it from the database.
err := au.saveCatchpointFile(round, "", 0, "")
if err != nil {
au.log.Warnf("accountUpdates: getCatchpointStream: unable to delete missing catchpoint entry: %v", err)
return nil, err
}
return nil, ErrNoEntry{}
}
// it's some other error.
return nil, fmt.Errorf("accountUpdates: getCatchpointStream: unable to open catchpoint file '%s' %v", catchpointPath, err)
}
// if the database doesn't know about that round, see if we have that file anyway:
fileName := filepath.Join("catchpoints", catchpointRoundToPath(round))
catchpointPath := filepath.Join(au.dbDirectory, fileName)
file, err := os.OpenFile(catchpointPath, os.O_RDONLY, 0666)
if err == nil && file != nil {
// great, if found that we should have had this in the database.. add this one now :
fileInfo, err := file.Stat()
if err != nil {
// we couldn't get the stat, so just return with the file.
return file, nil
}
err = au.saveCatchpointFile(round, fileName, fileInfo.Size(), "")
if err != nil {
au.log.Warnf("accountUpdates: getCatchpointStream: unable to save missing catchpoint entry: %v", err)
}
return file, nil
}
return nil, ErrNoEntry{}
}
// functions below this line are all internal functions
// accountUpdatesLedgerEvaluator is a "ledger emulator" which is used *only* by initializeCaches, as a way to shortcut
// the locks taken by the real ledger object when making requests that are being served by the accountUpdates.
// Using this struct allow us to take the tracker lock *before* calling the loadFromDisk, and having the operation complete
// without taking any locks. Note that it's not only the locks performance that is gained : by having the loadFrom disk
// not requiring any external locks, we can safely take a trackers lock on the ledger during reloadLedger, which ensures
// that even during catchpoint catchup mode switch, we're still correctly protected by a mutex.
type accountUpdatesLedgerEvaluator struct {
// au is the associated accountUpdates structure which invoking the trackerEvalVerified function, passing this structure as input.
// the accountUpdatesLedgerEvaluator would access the underlying accountUpdates function directly, bypassing the balances mutex lock.
au *accountUpdates
// prevHeader is the previous header to the current one. The usage of this is only in the context of initializeCaches where we iteratively
// building the StateDelta, which requires a peek on the "previous" header information.
prevHeader bookkeeping.BlockHeader
}
// GenesisHash returns the genesis hash
func (aul *accountUpdatesLedgerEvaluator) GenesisHash() crypto.Digest {
return aul.au.ledger.GenesisHash()
}
// BlockHdr returns the header of the given round. When the evaluator is running, it's only referring to the previous header, which is what we
// are providing here. Any attempt to access a different header would get denied.
func (aul *accountUpdatesLedgerEvaluator) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
if r == aul.prevHeader.Round {
return aul.prevHeader, nil
}
return bookkeeping.BlockHeader{}, ErrNoEntry{}
}
// Lookup returns the account balance for a given address at a given round
func (aul *accountUpdatesLedgerEvaluator) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
return aul.au.lookupImpl(rnd, addr, true)
}
// Totals returns the totals for a given round
func (aul *accountUpdatesLedgerEvaluator) Totals(rnd basics.Round) (AccountTotals, error) {
return aul.au.totalsImpl(rnd)
}
// isDup return whether a transaction is a duplicate one. It's not needed by the accountUpdatesLedgerEvaluator and implemeted as a stub.
func (aul *accountUpdatesLedgerEvaluator) isDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, txlease) (bool, error) {
// this is a non-issue since this call will never be made on non-validating evaluation
return false, fmt.Errorf("accountUpdatesLedgerEvaluator: tried to check for dup during accountUpdates initilization ")
}
// LookupWithoutRewards returns the account balance for a given address at a given round, without the reward
func (aul *accountUpdatesLedgerEvaluator) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
return aul.au.lookupImpl(rnd, addr, false)
}
// GetCreatorForRound returns the asset/app creator for a given asset/app index at a given round
func (aul *accountUpdatesLedgerEvaluator) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
return aul.au.getCreatorForRoundImpl(rnd, cidx, ctype)
}
// totalsImpl returns the totals for a given round
func (au *accountUpdates) totalsImpl(rnd basics.Round) (totals AccountTotals, err error) {
offset, err := au.roundOffset(rnd)
if err != nil {
return
}
totals = au.roundTotals[offset]
return
}
// initializeCaches fills up the accountUpdates cache with the most recent ~320 blocks
func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, writingCatchpointRound basics.Round) (catchpointBlockDigest crypto.Digest, err error) {
var blk bookkeeping.Block
var delta StateDelta
accLedgerEval := accountUpdatesLedgerEvaluator{
au: au,
}
if lastBalancesRound < lastestBlockRound {
accLedgerEval.prevHeader, err = au.ledger.BlockHdr(lastBalancesRound)
if err != nil {
return
}
}
for lastBalancesRound < lastestBlockRound {
next := lastBalancesRound + 1
blk, err = au.ledger.Block(next)
if err != nil {
return
}
delta, err = au.ledger.trackerEvalVerified(blk, &accLedgerEval)
if err != nil {
return
}
au.newBlockImpl(blk, delta)
lastBalancesRound = next
if next == basics.Round(writingCatchpointRound) {
catchpointBlockDigest = blk.Digest()
}
accLedgerEval.prevHeader = *delta.hdr
}
return
}
// initializeFromDisk performs the atomic operation of loading the accounts data information from disk
// and preparing the accountUpdates for operation, including initlizating the commitSyncer goroutine.
func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRound, lastestBlockRound basics.Round, err error) {
au.dbs = l.trackerDB()
au.log = l.trackerLog()
au.ledger = l
if au.initAccounts == nil {
err = fmt.Errorf("accountUpdates.initializeFromDisk: initAccounts not set")
return
}
lastestBlockRound = l.Latest()
err = au.dbs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
var err0 error
au.dbRound, err0 = au.accountsInitialize(ctx, tx)
if err0 != nil {
return err0
}
// Check for blocks DB and tracker DB un-sync
if au.dbRound > lastestBlockRound {
au.log.Warnf("accountUpdates.initializeFromDisk: resetting accounts DB (on round %v, but blocks DB's latest is %v)", au.dbRound, lastestBlockRound)
err0 = accountsReset(tx)
if err0 != nil {
return err0
}
au.dbRound, err0 = au.accountsInitialize(ctx, tx)
if err0 != nil {
return err0
}
}
totals, err0 := accountsTotals(tx, false)
if err0 != nil {
return err0
}
au.roundTotals = []AccountTotals{totals}
return nil
})
if err != nil {
return
}
// the VacuumDatabase would be a no-op if au.vacuumOnStartup is cleared.
au.vacuumDatabase(context.Background())
if err != nil {
return
}
au.accountsq, err = accountsDbInit(au.dbs.rdb.Handle, au.dbs.wdb.Handle)
au.lastCatchpointLabel, _, err = au.accountsq.readCatchpointStateString(context.Background(), catchpointStateLastCatchpoint)
if err != nil {
return
}
hdr, err := l.BlockHdr(au.dbRound)
if err != nil {
return
}
au.protos = []config.ConsensusParams{config.Consensus[hdr.CurrentProtocol]}
au.deltas = nil
au.creatableDeltas = nil
au.accounts = make(map[basics.Address]modifiedAccount)
au.creatables = make(map[basics.CreatableIndex]modifiedCreatable)
au.deltasAccum = []int{0}
// keep these channel closed if we're not generating catchpoint
au.catchpointWriting = make(chan struct{}, 1)
au.catchpointSlowWriting = make(chan struct{}, 1)
close(au.catchpointSlowWriting)
close(au.catchpointWriting)
au.ctx, au.ctxCancel = context.WithCancel(context.Background())
au.committedOffset = make(chan deferedCommit, 1)
au.commitSyncerClosed = make(chan struct{})
go au.commitSyncer(au.committedOffset)
lastBalancesRound = au.dbRound
return
}
// accountHashBuilder calculates the hash key used for the trie by combining the account address and the account data
func accountHashBuilder(addr basics.Address, accountData basics.AccountData, encodedAccountData []byte) []byte {
hash := make([]byte, 4+crypto.DigestSize)
// write out the lowest 32 bits of the reward base. This should improve the caching of the trie by allowing
// recent updated to be in-cache, and "older" nodes will be left alone.
for i, rewards := 3, accountData.RewardsBase; i >= 0; i, rewards = i-1, rewards>>8 {
// the following takes the rewards & 255 -> hash[i]
hash[i] = byte(rewards)
}
entryHash := crypto.Hash(append(addr[:], encodedAccountData[:]...))
copy(hash[4:], entryHash[:])
return hash[:]
}
// accountsInitialize initializes the accounts DB if needed and return currrent account round.
// as part of the initialization, it tests the current database schema version, and perform upgrade
// procedures to bring it up to the database schema supported by the binary.
func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (basics.Round, error) {
// check current database version.
dbVersion, err := db.GetUserVersion(ctx, tx)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to read database schema version : %v", err)
}
// if database version is greater than supported by current binary, write a warning. This would keep the existing
// fallback behaviour where we could use an older binary iff the schema happen to be backward compatible.
if dbVersion > accountDBVersion {
au.log.Warnf("accountsInitialize database schema version is %d, but algod supports only %d", dbVersion, accountDBVersion)
}
if dbVersion < accountDBVersion {
au.log.Infof("accountsInitialize upgrading database schema from version %d to version %d", dbVersion, accountDBVersion)
for dbVersion < accountDBVersion {
au.log.Infof("accountsInitialize performing upgrade from version %d", dbVersion)
// perform the initialization/upgrade
switch dbVersion {
case 0:
dbVersion, err = au.upgradeDatabaseSchema0(ctx, tx)
if err != nil {
au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 0 : %v", err)
return 0, err
}
case 1:
dbVersion, err = au.upgradeDatabaseSchema1(ctx, tx)
if err != nil {
au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 1 : %v", err)
return 0, err
}
default:
return 0, fmt.Errorf("accountsInitialize unable to upgrade database from schema version %d", dbVersion)
}
}
au.log.Infof("accountsInitialize database schema upgrade complete")
}
rnd, hashRound, err := accountsRound(tx)
if err != nil {
return 0, err
}
if hashRound != rnd {
// if the hashed round is different then the base round, something was modified, and the accounts aren't in sync
// with the hashes.
err = resetAccountHashes(tx)
if err != nil {
return 0, err
}
// if catchpoint is disabled on this node, we could complete the initialization right here.
if au.catchpointInterval == 0 {
return rnd, nil
}
}
// create the merkle trie for the balances
committer, err := makeMerkleCommitter(tx, false)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err)
}
trie, err := merkletrie.MakeTrie(committer, trieCachedNodesCount)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err)
}
// we might have a database that was previously initialized, and now we're adding the balances trie. In that case, we need to add all the existing balances to this trie.
// we can figure this out by examinine the hash of the root:
rootHash, err := trie.RootHash()
if err != nil {
return rnd, fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err)
}
if rootHash.IsZero() {
accountIdx := 0
for {
bal, err := encodedAccountsRange(ctx, tx, accountIdx, trieRebuildAccountChunkSize)
if err != nil {
return rnd, err
}
if len(bal) == 0 {
break
}
for _, balance := range bal {
var accountData basics.AccountData
err = protocol.Decode(balance.AccountData, &accountData)
if err != nil {
return rnd, err
}
hash := accountHashBuilder(balance.Address, accountData, balance.AccountData)
added, err := trie.Add(hash)
if err != nil {
return rnd, fmt.Errorf("accountsInitialize was unable to add changes to trie: %v", err)
}
if !added {
au.log.Warnf("accountsInitialize attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(hash), balance.Address)
}
}
// this trie Evict will commit using the current transaction.
// if anything goes wrong, it will still get rolled back.
_, err = trie.Evict(true)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
}
if len(bal) < trieRebuildAccountChunkSize {
break
}
accountIdx += trieRebuildAccountChunkSize
}
// we've just updated the markle trie, update the hashRound to reflect that.
err = updateAccountsRound(tx, rnd, rnd)
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to update the account round to %d: %v", rnd, err)
}
}
au.balancesTrie = trie
return rnd, nil
}
// upgradeDatabaseSchema0 upgrades the database schema from version 0 to version 1
//
// Schema of version 0 is expected to be aligned with the schema used on version 2.0.8 or before.
// Any database of version 2.0.8 would be of version 0. At this point, the database might
// have the following tables : ( i.e. a newly created database would not have these )
// * acctrounds
// * accounttotals
// * accountbase
// * assetcreators
// * storedcatchpoints
// * accounthashes
// * catchpointstate
//
// As the first step of the upgrade, the above tables are being created if they do not already exists.
// Following that, the assetcreators table is being altered by adding a new column to it (ctype).
// Last, in case the database was just created, it would get initialized with the following:
// The accountbase would get initialized with the au.initAccounts
// The accounttotals would get initialized to align with the initialization account added to accountbase
// The acctrounds would get updated to indicate that the balance matches round 0
//
func (au *accountUpdates) upgradeDatabaseSchema0(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) {
au.log.Infof("accountsInitialize initializing schema")
err = accountsInit(tx, au.initAccounts, au.initProto)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to initialize schema : %v", err)
}
_, err = db.SetUserVersion(ctx, tx, 1)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 0 to 1: %v", err)
}
return 1, nil
}
// upgradeDatabaseSchema1 upgrades the database schema from version 1 to version 2
//
// The schema updated to verison 2 intended to ensure that the encoding of all the accounts data is
// both canonical and identical across the entire network. On release 2.0.5 we released an upgrade to the messagepack.
// the upgraded messagepack was decoding the account data correctly, but would have different
// encoding compared to it's predecessor. As a result, some of the account data that was previously stored
// would have different encoded representation than the one on disk.
// To address this, this startup proceduce would attempt to scan all the accounts data. for each account data, we would
// see if it's encoding aligns with the current messagepack encoder. If it doesn't we would update it's encoding.
// then, depending if we found any such account data, we would reset the merkle trie and stored catchpoints.
// once the upgrade is complete, the accountsInitialize would (if needed) rebuild the merke trie using the new
// encoded accounts.
//
// This upgrade doesn't change any of the actual database schema ( i.e. tables, indexes ) but rather just performing
// a functional update to it's content.
//
func (au *accountUpdates) upgradeDatabaseSchema1(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) {
// update accounts encoding.
au.log.Infof("accountsInitialize verifying accounts data encoding")
modifiedAccounts, err := reencodeAccounts(ctx, tx)
if err != nil {
return 0, err
}
if modifiedAccounts > 0 {
au.log.Infof("accountsInitialize reencoded %d accounts", modifiedAccounts)
au.log.Infof("accountsInitialize resetting account hashes")
// reset the merkle trie
err = resetAccountHashes(tx)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to reset account hashes : %v", err)
}
au.log.Infof("accountsInitialize preparing queries")
// initialize a new accountsq with the incoming transaction.
accountsq, err := accountsDbInit(tx, tx)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to prepare queries : %v", err)
}
// close the prepared statements when we're done with them.
defer accountsq.close()
au.log.Infof("accountsInitialize resetting prior catchpoints")
// delete the last catchpoint label if we have any.
_, err = accountsq.writeCatchpointStateString(ctx, catchpointStateLastCatchpoint, "")
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to clear prior catchpoint : %v", err)
}
au.log.Infof("accountsInitialize deleting stored catchpoints")
// delete catchpoints.
err = au.deleteStoredCatchpoints(ctx, accountsq)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to delete stored catchpoints : %v", err)
}
} else {
au.log.Infof("accountsInitialize found that no accounts needed to be reencoded")
}
// update version
_, err = db.SetUserVersion(ctx, tx, 2)
if err != nil {
return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 1 to 2: %v", err)
}
return 2, nil
}
// deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk.
// once all the files have been deleted, it would go ahead and remove the entries from the table.
func (au *accountUpdates) deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries) (err error) {
catchpointsFilesChunkSize := 50
for {
fileNames, err := dbQueries.getOldestCatchpointFiles(ctx, catchpointsFilesChunkSize, 0)
if err != nil {
return err
}
if len(fileNames) == 0 {
break
}
for round, fileName := range fileNames {
absCatchpointFileName := filepath.Join(au.dbDirectory, fileName)
err = os.Remove(absCatchpointFileName)
if err == nil || os.IsNotExist(err) {
// it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
err = nil
} else {
// we can't delete the file, abort -
return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
}
// clear the entry from the database
err = dbQueries.storeCatchpoint(ctx, round, "", "", 0)
if err != nil {
return err
}
}
}
return nil
}
// accountsUpdateBalances applies the given deltas array to the merkle trie
func (au *accountUpdates) accountsUpdateBalances(accountsDeltasRound []map[basics.Address]accountDelta, offset uint64) (err error) {
if au.catchpointInterval == 0 {
return nil
}
var added, deleted bool
accumulatedChanges := 0
for i := uint64(0); i < offset; i++ {
accountsDeltas := accountsDeltasRound[i]
for addr, delta := range accountsDeltas {
if !delta.old.IsZero() {
deleteHash := accountHashBuilder(addr, delta.old, protocol.Encode(&delta.old))
deleted, err = au.balancesTrie.Delete(deleteHash)
if err != nil {
return err
}
if !deleted {
au.log.Warnf("failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr)
} else {
accumulatedChanges++
}
}
if !delta.new.IsZero() {
addHash := accountHashBuilder(addr, delta.new, protocol.Encode(&delta.new))
added, err = au.balancesTrie.Add(addHash)
if err != nil {
return err
}
if !added {
au.log.Warnf("attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr)
} else {
accumulatedChanges++
}
}
}
if accumulatedChanges >= trieAccumulatedChangesFlush {
accumulatedChanges = 0
err = au.balancesTrie.Commit()
if err != nil {
return
}
}
}
// write it all to disk.
if accumulatedChanges > 0 {
err = au.balancesTrie.Commit()
}
return
}
// newBlockImpl is the accountUpdates implementation of the ledgerTracker interface. This is the "internal" facing function
// which assumes that no lock need to be taken.
func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta StateDelta) {
proto := config.Consensus[blk.CurrentProtocol]
rnd := blk.Round()
if rnd <= au.latest() {
// Duplicate, ignore.
return
}
if rnd != au.latest()+1 {
au.log.Panicf("accountUpdates: newBlock %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
}
au.deltas = append(au.deltas, delta.accts)
au.protos = append(au.protos, proto)
au.creatableDeltas = append(au.creatableDeltas, delta.creatables)
au.roundDigest = append(au.roundDigest, blk.Digest())
au.deltasAccum = append(au.deltasAccum, len(delta.accts)+au.deltasAccum[len(au.deltasAccum)-1])
var ot basics.OverflowTracker
newTotals := au.roundTotals[len(au.roundTotals)-1]
allBefore := newTotals.All()
newTotals.applyRewards(delta.hdr.RewardsLevel, &ot)
for addr, data := range delta.accts {
newTotals.delAccount(proto, data.old, &ot)
newTotals.addAccount(proto, data.new, &ot)
macct := au.accounts[addr]
macct.ndeltas++
macct.data = data.new
au.accounts[addr] = macct
}
for cidx, cdelta := range delta.creatables {
mcreat := au.creatables[cidx]
mcreat.creator = cdelta.creator
mcreat.created = cdelta.created
mcreat.ctype = cdelta.ctype
mcreat.ndeltas++
au.creatables[cidx] = mcreat
}
if ot.Overflowed {
au.log.Panicf("accountUpdates: newBlock %d overflowed totals", rnd)
}
allAfter := newTotals.All()
if allBefore != allAfter {
au.log.Panicf("accountUpdates: sum of money changed from %d to %d", allBefore.Raw, allAfter.Raw)
}
au.roundTotals = append(au.roundTotals, newTotals)
}
// lookupImpl returns the accound data for a given address at a given round. The withRewards indicates whether the
// rewards should be added to the AccountData before returning. Note that the function doesn't update the account with the rewards,
// even while it could return the AccoutData which represent the "rewarded" account data.
func (au *accountUpdates) lookupImpl(rnd basics.Round, addr basics.Address, withRewards bool) (data basics.AccountData, err error) {
offset, err := au.roundOffset(rnd)
if err != nil {
return
}
offsetForRewards := offset
defer func() {
if withRewards {
totals := au.roundTotals[offsetForRewards]
proto := au.protos[offsetForRewards]
data = data.WithUpdatedRewards(proto, totals.RewardsLevel)
}
}()
// Check if this is the most recent round, in which case, we can
// use a cache of the most recent account state.
if offset == uint64(len(au.deltas)) {
macct, ok := au.accounts[addr]
if ok {
return macct.data, nil
}
} else {
// Check if the account has been updated recently. Traverse the deltas
// backwards to ensure that later updates take priority if present.
for offset > 0 {
offset--
d, ok := au.deltas[offset][addr]
if ok {
return d.new, nil
}
}
}
// No updates of this account in the in-memory deltas; use on-disk DB.
// The check in roundOffset() made sure the round is exactly the one
// present in the on-disk DB. As an optimization, we avoid creating
// a separate transaction here, and directly use a prepared SQL query
// against the database.
return au.accountsq.lookup(addr)
}
// getCreatorForRoundImpl returns the asset/app creator for a given asset/app index at a given round
func (au *accountUpdates) getCreatorForRoundImpl(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
offset, err := au.roundOffset(rnd)
if err != nil {
return basics.Address{}, false, err
}
// If this is the most recent round, au.creatables has will have the latest
// state and we can skip scanning backwards over creatableDeltas
if offset == uint64(len(au.deltas)) {
// Check if we already have the asset/creator in cache
creatableDelta, ok := au.creatables[cidx]
if ok {
if creatableDelta.created && creatableDelta.ctype == ctype {
return creatableDelta.creator, true, nil
}
return basics.Address{}, false, nil
}
} else {
for offset > 0 {
offset--
creatableDelta, ok := au.creatableDeltas[offset][cidx]
if ok {
if creatableDelta.created && creatableDelta.ctype == ctype {
return creatableDelta.creator, true, nil
}
return basics.Address{}, false, nil
}
}
}
// Check the database
return au.accountsq.lookupCreator(cidx, ctype)
}
// accountsCreateCatchpointLabel creates a catchpoint label and write it.
func (au *accountUpdates) accountsCreateCatchpointLabel(committedRound basics.Round, totals AccountTotals, ledgerBlockDigest crypto.Digest, trieBalancesHash crypto.Digest) (label string, err error) {
cpLabel := makeCatchpointLabel(committedRound, ledgerBlockDigest, trieBalancesHash, totals)
label = cpLabel.String()
_, err = au.accountsq.writeCatchpointStateString(context.Background(), catchpointStateLastCatchpoint, label)
return
}
// roundOffset calculates the offset of the given round compared to the current dbRound. Requires that the lock would be taken.
func (au *accountUpdates) roundOffset(rnd basics.Round) (offset uint64, err error) {
if rnd < au.dbRound {
err = fmt.Errorf("round %d before dbRound %d", rnd, au.dbRound)
return
}
off := uint64(rnd - au.dbRound)
if off > uint64(len(au.deltas)) {
err = fmt.Errorf("round %d too high: dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas))
return
}
return off, nil
}
// commitSyncer is the syncer go-routine function which perform the database updates. Internally, it dequeue deferedCommits and
// send the tasks to commitRound for completing the operation.
func (au *accountUpdates) commitSyncer(deferedCommits chan deferedCommit) {
defer close(au.commitSyncerClosed)
for {
select {
case committedOffset, ok := <-deferedCommits:
if !ok {
return
}
au.commitRound(committedOffset.offset, committedOffset.dbRound, committedOffset.lookback)
case <-au.ctx.Done():
// drain the pending commits queue:
drained := false
for !drained {
select {
case <-deferedCommits:
au.accountsWriting.Done()
default:
drained = true
}
}
return
}
}
}
// commitRound write to the database a "chunk" of rounds, and update the dbRound accordingly.
func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookback basics.Round) {
defer au.accountsWriting.Done()
au.accountsMu.RLock()
// we can exit right away, as this is the result of mis-ordered call to committedUpTo.
if au.dbRound < dbRound || offset < uint64(au.dbRound-dbRound) {
// if this is an archival ledger, we might need to close the catchpointWriting channel
if au.archivalLedger {
// determine if this was a catchpoint round
isCatchpointRound := ((offset + uint64(lookback+dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+dbRound))) % au.catchpointInterval))
if isCatchpointRound {
// it was a catchpoint round, so close the channel.
close(au.catchpointWriting)
}
}
au.accountsMu.RUnlock()
return
}
// adjust the offset according to what happend meanwhile..
offset -= uint64(au.dbRound - dbRound)
dbRound = au.dbRound
newBase := basics.Round(offset) + dbRound
flushTime := time.Now()
isCatchpointRound := ((offset + uint64(lookback+dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+dbRound))) % au.catchpointInterval))
// create a copy of the deltas, round totals and protos for the range we're going to flush.
deltas := make([]map[basics.Address]accountDelta, offset, offset)
creatableDeltas := make([]map[basics.CreatableIndex]modifiedCreatable, offset, offset)
roundTotals := make([]AccountTotals, offset+1, offset+1)
protos := make([]config.ConsensusParams, offset+1, offset+1)
copy(deltas, au.deltas[:offset])
copy(creatableDeltas, au.creatableDeltas[:offset])
copy(roundTotals, au.roundTotals[:offset+1])
copy(protos, au.protos[:offset+1])
// Keep track of how many changes to each account we flush to the
// account DB, so that we can drop the corresponding refcounts in
// au.accounts.
flushcount := make(map[basics.Address]int)
creatableFlushcount := make(map[basics.CreatableIndex]int)
var committedRoundDigest crypto.Digest
if isCatchpointRound {
committedRoundDigest = au.roundDigest[offset+uint64(lookback)-1]
}
au.accountsMu.RUnlock()
// in committedUpTo, we expect that this function we close the catchpointWriting when
// it's on a catchpoint round and it's an archival ledger. Doing this in a defered function
// here would prevent us from "forgetting" to close that channel later on.
defer func() {
if isCatchpointRound && au.archivalLedger {
close(au.catchpointWriting)
}
}()
for i := uint64(0); i < offset; i++ {
for addr := range deltas[i] {
flushcount[addr] = flushcount[addr] + 1
}
for cidx := range creatableDeltas[i] {
creatableFlushcount[cidx] = creatableFlushcount[cidx] + 1
}
}
var catchpointLabel string
beforeUpdatingBalancesTime := time.Now()
var trieBalancesHash crypto.Digest
err := au.dbs.wdb.AtomicCommitWriteLock(func(ctx context.Context, tx *sql.Tx) (err error) {
treeTargetRound := basics.Round(0)
if au.catchpointInterval > 0 {
mc, err0 := makeMerkleCommitter(tx, false)
if err0 != nil {
return err0
}
if au.balancesTrie == nil {
trie, err := merkletrie.MakeTrie(mc, trieCachedNodesCount)
if err != nil {
au.log.Warnf("unable to create merkle trie during committedUpTo: %v", err)
return err
}
au.balancesTrie = trie
} else {
au.balancesTrie.SetCommitter(mc)
}
treeTargetRound = dbRound + basics.Round(offset)
}
for i := uint64(0); i < offset; i++ {
err = accountsNewRound(tx, deltas[i], creatableDeltas[i])
if err != nil {
return err
}
}
err = totalsNewRounds(tx, deltas[:offset], roundTotals[1:offset+1], protos[1:offset+1])
if err != nil {
return err
}
err = au.accountsUpdateBalances(deltas, offset)
if err != nil {
return err
}
err = updateAccountsRound(tx, dbRound+basics.Round(offset), treeTargetRound)
if err != nil {
return err
}
if isCatchpointRound {
trieBalancesHash, err = au.balancesTrie.RootHash()
if err != nil {
return
}
}
return nil
}, &au.accountsMu)
if err != nil {
au.balancesTrie = nil
au.log.Warnf("unable to advance account snapshot: %v", err)
return
}
if isCatchpointRound {
catchpointLabel, err = au.accountsCreateCatchpointLabel(dbRound+basics.Round(offset)+lookback, roundTotals[offset], committedRoundDigest, trieBalancesHash)
if err != nil {
au.log.Warnf("commitRound : unable to create a catchpoint label: %v", err)
}
}
if au.balancesTrie != nil {
_, err = au.balancesTrie.Evict(false)
if err != nil {
au.log.Warnf("merkle trie failed to evict: %v", err)
}
}
if isCatchpointRound && catchpointLabel != "" {
au.lastCatchpointLabel = catchpointLabel
}
updatingBalancesDuration := time.Now().Sub(beforeUpdatingBalancesTime)
// Drop reference counts to modified accounts, and evict them
// from in-memory cache when no references remain.
for addr, cnt := range flushcount {
macct, ok := au.accounts[addr]
if !ok {
au.log.Panicf("inconsistency: flushed %d changes to %s, but not in au.accounts", cnt, addr)
}
if cnt > macct.ndeltas {
au.log.Panicf("inconsistency: flushed %d changes to %s, but au.accounts had %d", cnt, addr, macct.ndeltas)
}
macct.ndeltas -= cnt
if macct.ndeltas == 0 {
delete(au.accounts, addr)
} else {
au.accounts[addr] = macct
}
}
for cidx, cnt := range creatableFlushcount {
mcreat, ok := au.creatables[cidx]
if !ok {
au.log.Panicf("inconsistency: flushed %d changes to creatable %d, but not in au.creatables", cnt, cidx)
}
if cnt > mcreat.ndeltas {
au.log.Panicf("inconsistency: flushed %d changes to creatable %d, but au.creatables had %d", cnt, cidx, mcreat.ndeltas)
}
mcreat.ndeltas -= cnt
if mcreat.ndeltas == 0 {
delete(au.creatables, cidx)
} else {
au.creatables[cidx] = mcreat
}
}
au.deltas = au.deltas[offset:]
au.deltasAccum = au.deltasAccum[offset:]
au.roundDigest = au.roundDigest[offset:]
au.protos = au.protos[offset:]
au.roundTotals = au.roundTotals[offset:]
au.creatableDeltas = au.creatableDeltas[offset:]
au.dbRound = newBase
au.lastFlushTime = flushTime
au.accountsMu.Unlock()
if isCatchpointRound && au.archivalLedger && catchpointLabel != "" {
// generate the catchpoint file. This need to be done inline so that it will block any new accounts that from being written.
// the generateCatchpoint expects that the accounts data would not be modified in the background during it's execution.
au.generateCatchpoint(basics.Round(offset)+dbRound+lookback, catchpointLabel, committedRoundDigest, updatingBalancesDuration)
}
}
// latest returns the latest round
func (au *accountUpdates) latest() basics.Round {
return au.dbRound + basics.Round(len(au.deltas))
}
// generateCatchpoint generates a single catchpoint file
func (au *accountUpdates) generateCatchpoint(committedRound basics.Round, label string, committedRoundDigest crypto.Digest, updatingBalancesDuration time.Duration) {
beforeGeneratingCatchpointTime := time.Now()
catchpointGenerationStats := telemetryspec.CatchpointGenerationEventDetails{
BalancesWriteTime: uint64(updatingBalancesDuration.Nanoseconds()),
}
// the retryCatchpointCreation is used to repeat the catchpoint file generation in case the node crashed / aborted during startup
// before the catchpoint file generation could be completed.
retryCatchpointCreation := false
au.log.Debugf("accountUpdates: generateCatchpoint: generating catchpoint for round %d", committedRound)
defer func() {
if !retryCatchpointCreation {
// clear the writingCatchpoint flag
_, err := au.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(0))
if err != nil {
au.log.Warnf("accountUpdates: generateCatchpoint unable to clear catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
}
}
}()
_, err := au.accountsq.writeCatchpointStateUint64(context.Background(), catchpointStateWritingCatchpoint, uint64(committedRound))
if err != nil {
au.log.Warnf("accountUpdates: generateCatchpoint unable to write catchpoint state '%s' for round %d: %v", catchpointStateWritingCatchpoint, committedRound, err)
return
}
relCatchpointFileName := filepath.Join("catchpoints", catchpointRoundToPath(committedRound))
absCatchpointFileName := filepath.Join(au.dbDirectory, relCatchpointFileName)
catchpointWriter := makeCatchpointWriter(absCatchpointFileName, au.dbs.rdb, committedRound, committedRoundDigest, label)
more := true
const shortChunkExecutionDuration = 50 * time.Millisecond
const longChunkExecutionDuration = 1 * time.Second
var chunkExecutionDuration time.Duration
select {
case <-au.catchpointSlowWriting:
chunkExecutionDuration = longChunkExecutionDuration
default:
chunkExecutionDuration = shortChunkExecutionDuration
}
for more {
stepCtx, stepCancelFunction := context.WithTimeout(au.ctx, chunkExecutionDuration)
writeStepStartTime := time.Now()
more, err = catchpointWriter.WriteStep(stepCtx)
// accumulate the actual time we've spent writing in this step.
catchpointGenerationStats.CPUTime += uint64(time.Now().Sub(writeStepStartTime).Nanoseconds())
stepCancelFunction()
if more && err == nil {
// we just wrote some data, but there is more to be written.
// go to sleep for while.
select {
case <-time.After(100 * time.Millisecond):
case <-au.ctx.Done():
retryCatchpointCreation = true
err2 := catchpointWriter.Abort()
if err2 != nil {
au.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
}
return
case <-au.catchpointSlowWriting:
chunkExecutionDuration = longChunkExecutionDuration
}
}
if err != nil {
au.log.Warnf("accountUpdates: generateCatchpoint: unable to create catchpoint : %v", err)
err2 := catchpointWriter.Abort()
if err2 != nil {
au.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
}
return
}
}
err = au.saveCatchpointFile(committedRound, relCatchpointFileName, catchpointWriter.GetSize(), catchpointWriter.GetCatchpoint())
if err != nil {
au.log.Warnf("accountUpdates: generateCatchpoint: unable to save catchpoint: %v", err)
return
}
catchpointGenerationStats.FileSize = uint64(catchpointWriter.GetSize())
catchpointGenerationStats.WritingDuration = uint64(time.Now().Sub(beforeGeneratingCatchpointTime).Nanoseconds())
catchpointGenerationStats.AccountsCount = catchpointWriter.GetTotalAccounts()
catchpointGenerationStats.CatchpointLabel = catchpointWriter.GetCatchpoint()
au.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.CatchpointGenerationEvent, catchpointGenerationStats)
au.log.With("writingDuration", catchpointGenerationStats.WritingDuration).
With("CPUTime", catchpointGenerationStats.CPUTime).
With("balancesWriteTime", catchpointGenerationStats.BalancesWriteTime).
With("accountsCount", catchpointGenerationStats.AccountsCount).
With("fileSize", catchpointGenerationStats.FileSize).
With("catchpointLabel", catchpointGenerationStats.CatchpointLabel).
Infof("Catchpoint file was generated")
}
// catchpointRoundToPath calculate the catchpoint file path for a given round
func catchpointRoundToPath(rnd basics.Round) string {
irnd := int64(rnd) / 256
outStr := ""
for irnd > 0 {
outStr = filepath.Join(outStr, fmt.Sprintf("%02x", irnd%256))
irnd = irnd / 256
}
outStr = filepath.Join(outStr, strconv.FormatInt(int64(rnd), 10)+".catchpoint")
return outStr
}
// saveCatchpointFile stores the provided fileName as the stored catchpoint for the given round.
// after a successfull insert operation to the database, it would delete up to 2 old entries, as needed.
// deleting 2 entries while inserting single entry allow us to adjust the size of the backing storage and have the
// database and storage realign.
func (au *accountUpdates) saveCatchpointFile(round basics.Round, fileName string, fileSize int64, catchpoint string) (err error) {
if au.catchpointFileHistoryLength != 0 {
err = au.accountsq.storeCatchpoint(context.Background(), round, fileName, catchpoint, fileSize)
if err != nil {
au.log.Warnf("accountUpdates: saveCatchpoint: unable to save catchpoint: %v", err)
return
}
} else {
err = os.Remove(fileName)
if err != nil {
au.log.Warnf("accountUpdates: saveCatchpoint: unable to remove file (%s): %v", fileName, err)
return
}
}
if au.catchpointFileHistoryLength == -1 {
return
}
var filesToDelete map[basics.Round]string
filesToDelete, err = au.accountsq.getOldestCatchpointFiles(context.Background(), 2, au.catchpointFileHistoryLength)
if err != nil {
return fmt.Errorf("unable to delete catchpoint file, getOldestCatchpointFiles failed : %v", err)
}
for round, fileToDelete := range filesToDelete {
absCatchpointFileName := filepath.Join(au.dbDirectory, fileToDelete)
err = os.Remove(absCatchpointFileName)
if err == nil || os.IsNotExist(err) {
// it's ok if the file doesn't exist. just remove it from the database and we'll be good to go.
err = nil
} else {
// we can't delete the file, abort -
return fmt.Errorf("unable to delete old catchpoint file '%s' : %v", absCatchpointFileName, err)
}
err = au.accountsq.storeCatchpoint(context.Background(), round, "", "", 0)
if err != nil {
return fmt.Errorf("unable to delete old catchpoint entry '%s' : %v", fileToDelete, err)
}
}
return
}
// the vacuumDatabase performs a full vacuum of the accounts database.
func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
if !au.vacuumOnStartup {
return
}
startTime := time.Now()
vacuumExitCh := make(chan struct{}, 1)
vacuumLoggingAbort := sync.WaitGroup{}
vacuumLoggingAbort.Add(1)
// vacuuming the database can take a while. A long while. We want to have a logging function running in a separate go-routine that would log the progress to the log file.
// also, when we're done vacuuming, we should sent an event notifying of the total time it took to vacuum the database.
go func() {
defer vacuumLoggingAbort.Done()
au.log.Infof("Vacuuming accounts database started")
for {
select {
case <-time.After(5 * time.Second):
au.log.Infof("Vacuuming accounts database in progress")
case <-vacuumExitCh:
return
}
}
}()
vacuumStats, err := au.dbs.wdb.Vacuum(ctx)
close(vacuumExitCh)
vacuumLoggingAbort.Wait()
if err != nil {
au.log.Warnf("Vacuuming account database failed : %v", err)
return err
}
vacuumElapsedTime := time.Now().Sub(startTime)
au.log.Infof("Vacuuming accounts database completed within %v, reducing number of pages from %d to %d and size from %d to %d", vacuumElapsedTime, vacuumStats.PagesBefore, vacuumStats.PagesAfter, vacuumStats.SizeBefore, vacuumStats.SizeAfter)
vacuumTelemetryStats := telemetryspec.BalancesAccountVacuumEventDetails{
VacuumTimeNanoseconds: vacuumElapsedTime.Nanoseconds(),
BeforeVacuumPageCount: vacuumStats.PagesBefore,
AfterVacuumPageCount: vacuumStats.PagesAfter,
BeforeVacuumSpaceBytes: vacuumStats.SizeBefore,
AfterVacuumSpaceBytes: vacuumStats.SizeAfter,
}
au.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.BalancesAccountVacuumEvent, vacuumTelemetryStats)
return
}
| 1 | 39,817 | What is the reason for increasing this? | algorand-go-algorand | go |
@@ -153,7 +153,9 @@ module.exports = class Transloadit extends Plugin {
// Add Assembly-specific Tus endpoint.
const tus = {
...file.tus,
- endpoint: status.tus_url
+ endpoint: status.tus_url,
+ // Include X-Request-ID headers for better debugging.
+ addRequestId: true
}
// Set Companion location. We only add this, if 'file' has the attribute | 1 | const Translator = require('@uppy/utils/lib/Translator')
const hasProperty = require('@uppy/utils/lib/hasProperty')
const { Plugin } = require('@uppy/core')
const Tus = require('@uppy/tus')
const Assembly = require('./Assembly')
const Client = require('./Client')
const AssemblyOptions = require('./AssemblyOptions')
const AssemblyWatcher = require('./AssemblyWatcher')
function defaultGetAssemblyOptions (file, options) {
return {
params: options.params,
signature: options.signature,
fields: options.fields
}
}
const COMPANION = 'https://api2.transloadit.com/companion'
// Regex matching acceptable postMessage() origins for authentication feedback from companion.
const ALLOWED_COMPANION_PATTERN = /\.transloadit\.com$/
// Regex used to check if a Companion address is run by Transloadit.
const TL_COMPANION = /https?:\/\/api2(?:-\w+)?\.transloadit\.com\/companion/
const TL_UPPY_SERVER = /https?:\/\/api2(?:-\w+)?\.transloadit\.com\/uppy-server/
/**
* Upload files to Transloadit using Tus.
*/
module.exports = class Transloadit extends Plugin {
static VERSION = require('../package.json').version
constructor (uppy, opts) {
super(uppy, opts)
this.type = 'uploader'
this.id = this.opts.id || 'Transloadit'
this.title = 'Transloadit'
this.defaultLocale = {
strings: {
creatingAssembly: 'Preparing upload...',
creatingAssemblyFailed: 'Transloadit: Could not create Assembly',
encoding: 'Encoding...'
}
}
const defaultOptions = {
service: 'https://api2.transloadit.com',
errorReporting: true,
waitForEncoding: false,
waitForMetadata: false,
alwaysRunAssembly: false,
importFromUploadURLs: false,
signature: null,
params: null,
fields: {},
getAssemblyOptions: defaultGetAssemblyOptions,
limit: 0
}
this.opts = { ...defaultOptions, ...opts }
this.i18nInit()
this._prepareUpload = this._prepareUpload.bind(this)
this._afterUpload = this._afterUpload.bind(this)
this._onError = this._onError.bind(this)
this._onTusError = this._onTusError.bind(this)
this._onCancelAll = this._onCancelAll.bind(this)
this._onFileUploadURLAvailable = this._onFileUploadURLAvailable.bind(this)
this._onRestored = this._onRestored.bind(this)
this._getPersistentData = this._getPersistentData.bind(this)
const hasCustomAssemblyOptions = this.opts.getAssemblyOptions !== defaultOptions.getAssemblyOptions
if (this.opts.params) {
AssemblyOptions.validateParams(this.opts.params)
} else if (!hasCustomAssemblyOptions) {
// Throw the same error that we'd throw if the `params` returned from a
// `getAssemblyOptions()` function is null.
AssemblyOptions.validateParams(null)
}
this.client = new Client({
service: this.opts.service,
client: this._getClientVersion(),
errorReporting: this.opts.errorReporting
})
// Contains Assembly instances for in-progress Assemblies.
this.activeAssemblies = {}
// Contains a mapping of uploadID to AssemblyWatcher
this.assemblyWatchers = {}
// Contains a file IDs that have completed postprocessing before the upload they belong to has entered the postprocess stage.
this.completedFiles = Object.create(null)
}
setOptions (newOpts) {
super.setOptions(newOpts)
this.i18nInit()
}
i18nInit () {
this.translator = new Translator([this.defaultLocale, this.uppy.locale, this.opts.locale])
this.i18n = this.translator.translate.bind(this.translator)
this.i18nArray = this.translator.translateArray.bind(this.translator)
this.setPluginState() // so that UI re-renders and we see the updated locale
}
_getClientVersion () {
const list = [
`uppy-core:${this.uppy.constructor.VERSION}`,
`uppy-transloadit:${this.constructor.VERSION}`,
`uppy-tus:${Tus.VERSION}`
]
const addPluginVersion = (pluginName, versionName) => {
const plugin = this.uppy.getPlugin(pluginName)
if (plugin) {
list.push(`${versionName}:${plugin.constructor.VERSION}`)
}
}
if (this.opts.importFromUploadURLs) {
addPluginVersion('XHRUpload', 'uppy-xhr-upload')
addPluginVersion('AwsS3', 'uppy-aws-s3')
addPluginVersion('AwsS3Multipart', 'uppy-aws-s3-multipart')
}
addPluginVersion('Dropbox', 'uppy-dropbox')
addPluginVersion('Facebook', 'uppy-facebook')
addPluginVersion('GoogleDrive', 'uppy-google-drive')
addPluginVersion('Instagram', 'uppy-instagram')
addPluginVersion('OneDrive', 'uppy-onedrive')
addPluginVersion('Url', 'uppy-url')
return list.join(',')
}
/**
* Attach metadata to files to configure the Tus plugin to upload to Transloadit.
* Also use Transloadit's Companion
*
* See: https://github.com/tus/tusd/wiki/Uploading-to-Transloadit-using-tus#uploading-using-tus
*
* @param {object} file
* @param {object} status
*/
_attachAssemblyMetadata (file, status) {
// Add the metadata parameters Transloadit needs.
const meta = {
...file.meta,
assembly_url: status.assembly_url,
filename: file.name,
fieldname: 'file'
}
// Add Assembly-specific Tus endpoint.
const tus = {
...file.tus,
endpoint: status.tus_url
}
// Set Companion location. We only add this, if 'file' has the attribute
// remote, because this is the criteria to identify remote files.
// We only replace the hostname for Transloadit's companions, so that
// people can also self-host them while still using Transloadit for encoding.
let remote = file.remote
if (file.remote && TL_UPPY_SERVER.test(file.remote.companionUrl)) {
const err = new Error(
'The https://api2.transloadit.com/uppy-server endpoint was renamed to ' +
'https://api2.transloadit.com/companion, please update your `companionUrl` ' +
'options accordingly.')
// Explicitly log this error here because it is caught by the `createAssembly`
// Promise further along.
// That's fine, but createAssembly only shows the informer, we need something a
// little more noisy.
this.uppy.log(err)
throw err
}
if (file.remote && TL_COMPANION.test(file.remote.companionUrl)) {
const newHost = status.companion_url
.replace(/\/$/, '')
const path = file.remote.url
.replace(file.remote.companionUrl, '')
.replace(/^\//, '')
remote = {
...file.remote,
companionUrl: newHost,
url: `${newHost}/${path}`
}
}
// Store the Assembly ID this file is in on the file under the `transloadit` key.
const newFile = {
...file,
transloadit: {
assembly: status.assembly_id
}
}
// Only configure the Tus plugin if we are uploading straight to Transloadit (the default).
if (!this.opts.importFromUploadURLs) {
Object.assign(newFile, { meta, tus, remote })
}
return newFile
}
_createAssembly (fileIDs, uploadID, options) {
this.uppy.log('[Transloadit] Create Assembly')
return this.client.createAssembly({
params: options.params,
fields: options.fields,
expectedFiles: fileIDs.length,
signature: options.signature
}).then((newAssembly) => {
const assembly = new Assembly(newAssembly)
const status = assembly.status
const assemblyID = status.assembly_id
const { assemblies, uploadsAssemblies } = this.getPluginState()
this.setPluginState({
// Store the Assembly status.
assemblies: {
...assemblies,
[assemblyID]: status
},
// Store the list of Assemblies related to this upload.
uploadsAssemblies: {
...uploadsAssemblies,
[uploadID]: [
...uploadsAssemblies[uploadID],
assemblyID
]
}
})
const { files } = this.uppy.getState()
const updatedFiles = {}
fileIDs.forEach((id) => {
updatedFiles[id] = this._attachAssemblyMetadata(this.uppy.getFile(id), status)
})
this.uppy.setState({
files: {
...files,
...updatedFiles
}
})
this.uppy.emit('transloadit:assembly-created', status, fileIDs)
this.uppy.log(`[Transloadit] Created Assembly ${assemblyID}`)
return assembly
}).catch((err) => {
err.message = `${this.i18n('creatingAssemblyFailed')}: ${err.message}`
// Reject the promise.
throw err
})
}
_createAssemblyWatcher (assemblyID, fileIDs, uploadID) {
// AssemblyWatcher tracks completion states of all Assemblies in this upload.
const watcher = new AssemblyWatcher(this.uppy, assemblyID)
watcher.on('assembly-complete', (id) => {
const files = this.getAssemblyFiles(id)
files.forEach((file) => {
this.completedFiles[file.id] = true
this.uppy.emit('postprocess-complete', file)
})
})
watcher.on('assembly-error', (id, error) => {
// Clear postprocessing state for all our files.
const files = this.getAssemblyFiles(id)
files.forEach((file) => {
// TODO Maybe make a postprocess-error event here?
this.uppy.emit('upload-error', file, error)
this.uppy.emit('postprocess-complete', file)
})
})
this.assemblyWatchers[uploadID] = watcher
}
_shouldWaitAfterUpload () {
return this.opts.waitForEncoding || this.opts.waitForMetadata
}
/**
* Used when `importFromUploadURLs` is enabled: reserves all files in
* the Assembly.
*/
_reserveFiles (assembly, fileIDs) {
return Promise.all(fileIDs.map((fileID) => {
const file = this.uppy.getFile(fileID)
return this.client.reserveFile(assembly, file)
}))
}
/**
* Used when `importFromUploadURLs` is enabled: adds files to the Assembly
* once they have been fully uploaded.
*/
_onFileUploadURLAvailable (file) {
if (!file || !file.transloadit || !file.transloadit.assembly) {
return
}
const { assemblies } = this.getPluginState()
const assembly = assemblies[file.transloadit.assembly]
this.client.addFile(assembly, file).catch((err) => {
this.uppy.log(err)
this.uppy.emit('transloadit:import-error', assembly, file.id, err)
})
}
_findFile (uploadedFile) {
const files = this.uppy.getFiles()
for (let i = 0; i < files.length; i++) {
const file = files[i]
// Completed file upload.
if (file.uploadURL === uploadedFile.tus_upload_url) {
return file
}
// In-progress file upload.
if (file.tus && file.tus.uploadUrl === uploadedFile.tus_upload_url) {
return file
}
if (!uploadedFile.is_tus_file) {
// Fingers-crossed check for non-tus uploads, eg imported from S3.
if (file.name === uploadedFile.name && file.size === uploadedFile.size) {
return file
}
}
}
}
_onFileUploadComplete (assemblyId, uploadedFile) {
const state = this.getPluginState()
const file = this._findFile(uploadedFile)
if (!file) {
this.uppy.log('[Transloadit] Couldn’t file the file, it was likely removed in the process')
return
}
this.setPluginState({
files: {
...state.files,
[uploadedFile.id]: {
assembly: assemblyId,
id: file.id,
uploadedFile
}
}
})
this.uppy.emit('transloadit:upload', uploadedFile, this.getAssembly(assemblyId))
}
/**
* Callback when a new Assembly result comes in.
*
* @param {string} assemblyId
* @param {string} stepName
* @param {object} result
*/
_onResult (assemblyId, stepName, result) {
const state = this.getPluginState()
const file = state.files[result.original_id]
// The `file` may not exist if an import robot was used instead of a file upload.
result.localId = file ? file.id : null
const entry = {
result,
stepName,
id: result.id,
assembly: assemblyId
}
this.setPluginState({
results: [...state.results, entry]
})
this.uppy.emit('transloadit:result', stepName, result, this.getAssembly(assemblyId))
}
/**
* When an Assembly has finished processing, get the final state
* and emit it.
*
* @param {object} status
*/
_onAssemblyFinished (status) {
const url = status.assembly_ssl_url
this.client.getAssemblyStatus(url).then((finalStatus) => {
const assemblyId = finalStatus.assemblyId
const state = this.getPluginState()
this.setPluginState({
assemblies: {
...state.assemblies,
[assemblyId]: finalStatus
}
})
this.uppy.emit('transloadit:complete', finalStatus)
})
}
_cancelAssembly (assembly) {
return this.client.cancelAssembly(assembly).then(() => {
// TODO bubble this through AssemblyWatcher so its event handlers can clean up correctly
this.uppy.emit('transloadit:assembly-cancelled', assembly)
})
}
/**
* When all files are removed, cancel in-progress Assemblies.
*/
_onCancelAll () {
const { assemblies } = this.getPluginState()
const cancelPromises = Object.keys(assemblies).map((assemblyID) => {
const assembly = this.getAssembly(assemblyID)
return this._cancelAssembly(assembly)
})
Promise.all(cancelPromises).catch((err) => {
this.uppy.log(err)
})
}
/**
* Custom state serialization for the Golden Retriever plugin.
* It will pass this back to the `_onRestored` function.
*
* @param {Function} setData
*/
_getPersistentData (setData) {
const state = this.getPluginState()
const assemblies = state.assemblies
const uploadsAssemblies = state.uploadsAssemblies
setData({
[this.id]: {
assemblies,
uploadsAssemblies
}
})
}
_onRestored (pluginData) {
const savedState = pluginData && pluginData[this.id] ? pluginData[this.id] : {}
const previousAssemblies = savedState.assemblies || {}
const uploadsAssemblies = savedState.uploadsAssemblies || {}
if (Object.keys(uploadsAssemblies).length === 0) {
// Nothing to restore.
return
}
// Convert loaded Assembly statuses to a Transloadit plugin state object.
const restoreState = (assemblies) => {
const files = {}
const results = []
Object.keys(assemblies).forEach((id) => {
const status = assemblies[id]
status.uploads.forEach((uploadedFile) => {
const file = this._findFile(uploadedFile)
files[uploadedFile.id] = {
id: file.id,
assembly: id,
uploadedFile
}
})
const state = this.getPluginState()
Object.keys(status.results).forEach((stepName) => {
status.results[stepName].forEach((result) => {
const file = state.files[result.original_id]
result.localId = file ? file.id : null
results.push({
id: result.id,
result,
stepName,
assembly: id
})
})
})
})
this.setPluginState({
assemblies,
files,
results,
uploadsAssemblies
})
}
// Set up the Assembly instances and AssemblyWatchers for existing Assemblies.
const restoreAssemblies = () => {
const { assemblies, uploadsAssemblies } = this.getPluginState()
// Set up the assembly watchers again for all the ongoing uploads.
Object.keys(uploadsAssemblies).forEach((uploadID) => {
const assemblyIDs = uploadsAssemblies[uploadID]
const fileIDsInUpload = assemblyIDs.reduce((acc, assemblyID) => {
const fileIDsInAssembly = this.getAssemblyFiles(assemblyID).map((file) => file.id)
acc.push(...fileIDsInAssembly)
return acc
}, [])
this._createAssemblyWatcher(assemblyIDs, fileIDsInUpload, uploadID)
})
const allAssemblyIDs = Object.keys(assemblies)
allAssemblyIDs.forEach((id) => {
const assembly = new Assembly(assemblies[id])
this._connectAssembly(assembly)
})
}
// Force-update all Assemblies to check for missed events.
const updateAssemblies = () => {
const { assemblies } = this.getPluginState()
return Promise.all(
Object.keys(assemblies).map((id) => {
return this.activeAssemblies[id].update()
})
)
}
// Restore all Assembly state.
this.restored = Promise.resolve().then(() => {
restoreState(previousAssemblies)
restoreAssemblies()
return updateAssemblies()
})
this.restored.then(() => {
this.restored = null
})
}
_connectAssembly (assembly) {
const { status } = assembly
const id = status.assembly_id
this.activeAssemblies[id] = assembly
// Sync local `assemblies` state
assembly.on('status', (newStatus) => {
const { assemblies } = this.getPluginState()
this.setPluginState({
assemblies: {
...assemblies,
[id]: newStatus
}
})
})
assembly.on('upload', (file) => {
this._onFileUploadComplete(id, file)
})
assembly.on('error', (error) => {
error.assembly = assembly.status
this.uppy.emit('transloadit:assembly-error', assembly.status, error)
})
assembly.on('executing', () => {
this.uppy.emit('transloadit:assembly-executing', assembly.status)
})
if (this.opts.waitForEncoding) {
assembly.on('result', (stepName, result) => {
this._onResult(id, stepName, result)
})
}
if (this.opts.waitForEncoding) {
assembly.on('finished', () => {
this._onAssemblyFinished(assembly.status)
})
} else if (this.opts.waitForMetadata) {
assembly.on('metadata', () => {
this._onAssemblyFinished(assembly.status)
})
}
// No need to connect to the socket if the Assembly has completed by now.
if (assembly.ok === 'ASSEMBLY_COMPLETE') {
return assembly
}
// TODO Do we still need this for anything…?
// eslint-disable-next-line no-unused-vars
const connected = new Promise((resolve, reject) => {
assembly.once('connect', resolve)
assembly.once('status', resolve)
assembly.once('error', reject)
}).then(() => {
this.uppy.log('[Transloadit] Socket is ready')
})
assembly.connect()
return assembly
}
_prepareUpload (fileIDs, uploadID) {
// Only use files without errors
fileIDs = fileIDs.filter((file) => !file.error)
fileIDs.forEach((fileID) => {
const file = this.uppy.getFile(fileID)
this.uppy.emit('preprocess-progress', file, {
mode: 'indeterminate',
message: this.i18n('creatingAssembly')
})
})
const createAssembly = ({ fileIDs, options }) => {
let createdAssembly
return this._createAssembly(fileIDs, uploadID, options).then((assembly) => {
createdAssembly = assembly
if (this.opts.importFromUploadURLs) {
return this._reserveFiles(assembly, fileIDs)
}
}).then(() => {
fileIDs.forEach((fileID) => {
const file = this.uppy.getFile(fileID)
this.uppy.emit('preprocess-complete', file)
})
return createdAssembly
}).catch((err) => {
fileIDs.forEach((fileID) => {
const file = this.uppy.getFile(fileID)
// Clear preprocessing state when the Assembly could not be created,
// otherwise the UI gets confused about the lingering progress keys
this.uppy.emit('preprocess-complete', file)
this.uppy.emit('upload-error', file, err)
})
throw err
})
}
const { uploadsAssemblies } = this.getPluginState()
this.setPluginState({
uploadsAssemblies: {
...uploadsAssemblies,
[uploadID]: []
}
})
const files = fileIDs.map((id) => this.uppy.getFile(id))
const assemblyOptions = new AssemblyOptions(files, this.opts)
return assemblyOptions.build().then(
(assemblies) => Promise.all(
assemblies.map(createAssembly)
).then((createdAssemblies) => {
const assemblyIDs = createdAssemblies.map(assembly => assembly.status.assembly_id)
this._createAssemblyWatcher(assemblyIDs, fileIDs, uploadID)
createdAssemblies.map(assembly => this._connectAssembly(assembly))
}),
// If something went wrong before any Assemblies could be created,
// clear all processing state.
(err) => {
fileIDs.forEach((fileID) => {
const file = this.uppy.getFile(fileID)
this.uppy.emit('preprocess-complete', file)
this.uppy.emit('upload-error', file, err)
})
throw err
}
)
}
_afterUpload (fileIDs, uploadID) {
const files = fileIDs.map(fileID => this.uppy.getFile(fileID))
// Only use files without errors
fileIDs = files.filter((file) => !file.error).map(file => file.id)
const state = this.getPluginState()
// If we're still restoring state, wait for that to be done.
if (this.restored) {
return this.restored.then(() => {
return this._afterUpload(fileIDs, uploadID)
})
}
const assemblyIDs = state.uploadsAssemblies[uploadID]
// If we don't have to wait for encoding metadata or results, we can close
// the socket immediately and finish the upload.
if (!this._shouldWaitAfterUpload()) {
assemblyIDs.forEach((assemblyID) => {
const assembly = this.activeAssemblies[assemblyID]
assembly.close()
delete this.activeAssemblies[assemblyID]
})
const assemblies = assemblyIDs.map((id) => this.getAssembly(id))
this.uppy.addResultData(uploadID, { transloadit: assemblies })
return Promise.resolve()
}
// If no Assemblies were created for this upload, we also do not have to wait.
// There's also no sockets or anything to close, so just return immediately.
if (assemblyIDs.length === 0) {
this.uppy.addResultData(uploadID, { transloadit: [] })
return Promise.resolve()
}
const incompleteFiles = files.filter(file => !hasProperty(this.completedFiles, file.id))
incompleteFiles.forEach((file) => {
this.uppy.emit('postprocess-progress', file, {
mode: 'indeterminate',
message: this.i18n('encoding')
})
})
const watcher = this.assemblyWatchers[uploadID]
return watcher.promise.then(() => {
const assemblies = assemblyIDs.map((id) => this.getAssembly(id))
// Remove the Assembly ID list for this upload,
// it's no longer going to be used anywhere.
const state = this.getPluginState()
const uploadsAssemblies = { ...state.uploadsAssemblies }
delete uploadsAssemblies[uploadID]
this.setPluginState({ uploadsAssemblies })
this.uppy.addResultData(uploadID, {
transloadit: assemblies
})
})
}
_onError (err = null, uploadID) {
const state = this.getPluginState()
const assemblyIDs = state.uploadsAssemblies[uploadID]
assemblyIDs.forEach((assemblyID) => {
if (this.activeAssemblies[assemblyID]) {
this.activeAssemblies[assemblyID].close()
}
})
}
_onTusError (err) {
if (err && /^tus: /.test(err.message)) {
const url = err.originalRequest && err.originalRequest.responseURL
? err.originalRequest.responseURL
: null
this.client.submitError(err, { url, type: 'TUS_ERROR' }).then((_) => {
// if we can't report the error that sucks
})
}
}
install () {
this.uppy.addPreProcessor(this._prepareUpload)
this.uppy.addPostProcessor(this._afterUpload)
// We may need to close socket.io connections on error.
this.uppy.on('error', this._onError)
// Handle cancellation.
this.uppy.on('cancel-all', this._onCancelAll)
// For error reporting.
this.uppy.on('upload-error', this._onTusError)
if (this.opts.importFromUploadURLs) {
// No uploader needed when importing; instead we take the upload URL from an existing uploader.
this.uppy.on('upload-success', this._onFileUploadURLAvailable)
} else {
this.uppy.use(Tus, {
// Disable tus-js-client fingerprinting, otherwise uploading the same file at different times
// will upload to an outdated Assembly, and we won't get socket events for it.
//
// To resume a Transloadit upload, we need to reconnect to the websocket, and the state that's
// required to do that is not saved by tus-js-client's fingerprinting. We need the tus URL,
// the Assembly URL, and the WebSocket URL, at least. We also need to know _all_ the files that
// were added to the Assembly, so we can properly complete it. All that state is handled by
// Golden Retriever. So, Golden Retriever is required to do resumability with the Transloadit plugin,
// and we disable Tus's default resume implementation to prevent bad behaviours.
resume: false,
// Disable Companion's retry optimisation; we need to change the endpoint on retry
// so it can't just reuse the same tus.Upload instance server-side.
useFastRemoteRetry: false,
// Only send Assembly metadata to the tus endpoint.
metaFields: ['assembly_url', 'filename', 'fieldname'],
// Pass the limit option to @uppy/tus
limit: this.opts.limit
})
}
this.uppy.on('restore:get-data', this._getPersistentData)
this.uppy.on('restored', this._onRestored)
this.setPluginState({
// Contains Assembly status objects, indexed by their ID.
assemblies: {},
// Contains arrays of Assembly IDs, indexed by the upload ID that they belong to.
uploadsAssemblies: {},
// Contains file data from Transloadit, indexed by their Transloadit-assigned ID.
files: {},
// Contains result data from Transloadit.
results: []
})
// We cannot cancel individual files because Assemblies tend to contain many files.
const { capabilities } = this.uppy.getState()
this.uppy.setState({
capabilities: {
...capabilities,
individualCancellation: false
}
})
}
uninstall () {
this.uppy.removePreProcessor(this._prepareUpload)
this.uppy.removePostProcessor(this._afterUpload)
this.uppy.off('error', this._onError)
if (this.opts.importFromUploadURLs) {
this.uppy.off('upload-success', this._onFileUploadURLAvailable)
}
const { capabilities } = this.uppy.getState()
this.uppy.setState({
capabilities: {
...capabilities,
individualCancellation: true
}
})
}
getAssembly (id) {
const { assemblies } = this.getPluginState()
return assemblies[id]
}
getAssemblyFiles (assemblyID) {
return this.uppy.getFiles().filter((file) => {
return file && file.transloadit && file.transloadit.assembly === assemblyID
})
}
}
module.exports.COMPANION = COMPANION
module.exports.UPPY_SERVER = COMPANION
module.exports.COMPANION_PATTERN = ALLOWED_COMPANION_PATTERN
| 1 | 13,070 | is there any reason we are not adding this to companion as well? There are no CORS concerns there, so adding probably shouldn't be a problem, no? **One question about the requestId;** Are the request Ids unique to each tus-js-client request, or are they unique to an upload instance instead? | transloadit-uppy | js |
@@ -76,6 +76,8 @@ public final class CsrfConfigurer<H extends HttpSecurityBuilder<H>> extends Abst
private CsrfTokenRepository csrfTokenRepository = new HttpSessionCsrfTokenRepository();
private RequestMatcher requireCsrfProtectionMatcher = CsrfFilter.DEFAULT_CSRF_MATCHER;
private List<RequestMatcher> ignoredCsrfProtectionMatchers = new ArrayList<RequestMatcher>();
+ private String cookieName;
+ private String cookiePath;
/**
* Creates a new instance | 1 | /*
* Copyright 2002-2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.config.annotation.web.configurers;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import org.springframework.security.access.AccessDeniedException;
import org.springframework.security.config.annotation.web.AbstractRequestMatcherRegistry;
import org.springframework.security.config.annotation.web.HttpSecurityBuilder;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.web.access.AccessDeniedHandler;
import org.springframework.security.web.access.AccessDeniedHandlerImpl;
import org.springframework.security.web.access.DelegatingAccessDeniedHandler;
import org.springframework.security.web.csrf.CsrfAuthenticationStrategy;
import org.springframework.security.web.csrf.CsrfFilter;
import org.springframework.security.web.csrf.CsrfLogoutHandler;
import org.springframework.security.web.csrf.CsrfTokenRepository;
import org.springframework.security.web.csrf.HttpSessionCsrfTokenRepository;
import org.springframework.security.web.csrf.MissingCsrfTokenException;
import org.springframework.security.web.session.InvalidSessionAccessDeniedHandler;
import org.springframework.security.web.session.InvalidSessionStrategy;
import org.springframework.security.web.util.matcher.AndRequestMatcher;
import org.springframework.security.web.util.matcher.NegatedRequestMatcher;
import org.springframework.security.web.util.matcher.OrRequestMatcher;
import org.springframework.security.web.util.matcher.RequestMatcher;
import org.springframework.util.Assert;
/**
* Adds <a
* href="https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)"
* >CSRF</a> protection for the methods as specified by
* {@link #requireCsrfProtectionMatcher(RequestMatcher)}.
*
* <h2>Security Filters</h2>
*
* The following Filters are populated
*
* <ul>
* <li>{@link CsrfFilter}</li>
* </ul>
*
* <h2>Shared Objects Created</h2>
*
* No shared objects are created.
*
* <h2>Shared Objects Used</h2>
*
* <ul>
* <li>
* {@link ExceptionHandlingConfigurer#accessDeniedHandler(AccessDeniedHandler)}
* is used to determine how to handle CSRF attempts</li>
* <li>{@link InvalidSessionStrategy}</li>
* </ul>
*
* @author Rob Winch
* @since 3.2
*/
public final class CsrfConfigurer<H extends HttpSecurityBuilder<H>> extends AbstractHttpConfigurer<CsrfConfigurer<H>,H> {
private CsrfTokenRepository csrfTokenRepository = new HttpSessionCsrfTokenRepository();
private RequestMatcher requireCsrfProtectionMatcher = CsrfFilter.DEFAULT_CSRF_MATCHER;
private List<RequestMatcher> ignoredCsrfProtectionMatchers = new ArrayList<RequestMatcher>();
/**
* Creates a new instance
* @see HttpSecurity#csrf()
*/
public CsrfConfigurer() {
}
/**
* Specify the {@link CsrfTokenRepository} to use. The default is an {@link HttpSessionCsrfTokenRepository}.
*
* @param csrfTokenRepository the {@link CsrfTokenRepository} to use
* @return the {@link CsrfConfigurer} for further customizations
*/
public CsrfConfigurer<H> csrfTokenRepository(CsrfTokenRepository csrfTokenRepository) {
Assert.notNull(csrfTokenRepository, "csrfTokenRepository cannot be null");
this.csrfTokenRepository = csrfTokenRepository;
return this;
}
/**
* Specify the {@link RequestMatcher} to use for determining when CSRF
* should be applied. The default is to ignore GET, HEAD, TRACE, OPTIONS and
* process all other requests.
*
* @param requireCsrfProtectionMatcher
* the {@link RequestMatcher} to use
* @return the {@link CsrfConfigurer} for further customizations
*/
public CsrfConfigurer<H> requireCsrfProtectionMatcher(RequestMatcher requireCsrfProtectionMatcher) {
Assert.notNull(requireCsrfProtectionMatcher, "requireCsrfProtectionMatcher cannot be null");
this.requireCsrfProtectionMatcher = requireCsrfProtectionMatcher;
return this;
}
/**
* <p>
* Allows specifying {@link HttpServletRequest} that should not use CSRF Protection even if they match the {@link #requireCsrfProtectionMatcher(RequestMatcher)}.
* </p>
*
* <p>
* The following will ensure CSRF protection ignores:
* </p>
* <ul>
* <li>Any GET, HEAD, TRACE, OPTIONS (this is the default)</li>
* <li>We also explicitly state to ignore any request that starts with "/sockjs/"</li>
* </ul>
*
* <pre>
* http
* .csrf()
* .ignoringAntMatchers("/sockjs/**")
* .and()
* ...
* </pre>
*
* @since 4.0
*/
public CsrfConfigurer<H> ignoringAntMatchers(String... antPatterns) {
return new IgnoreCsrfProtectionRegistry().antMatchers(antPatterns).and();
}
@SuppressWarnings("unchecked")
@Override
public void configure(H http) throws Exception {
CsrfFilter filter = new CsrfFilter(csrfTokenRepository);
RequestMatcher requireCsrfProtectionMatcher = getRequireCsrfProtectionMatcher();
if(requireCsrfProtectionMatcher != null) {
filter.setRequireCsrfProtectionMatcher(requireCsrfProtectionMatcher);
}
AccessDeniedHandler accessDeniedHandler = createAccessDeniedHandler(http);
if(accessDeniedHandler != null) {
filter.setAccessDeniedHandler(accessDeniedHandler);
}
LogoutConfigurer<H> logoutConfigurer = http.getConfigurer(LogoutConfigurer.class);
if(logoutConfigurer != null) {
logoutConfigurer.addLogoutHandler(new CsrfLogoutHandler(csrfTokenRepository));
}
SessionManagementConfigurer<H> sessionConfigurer = http.getConfigurer(SessionManagementConfigurer.class);
if(sessionConfigurer != null) {
sessionConfigurer.addSessionAuthenticationStrategy(new CsrfAuthenticationStrategy(csrfTokenRepository));
}
filter = postProcess(filter);
http.addFilter(filter);
}
/**
* Gets the final {@link RequestMatcher} to use by combining the {@link #requireCsrfProtectionMatcher(RequestMatcher)} and any {@link #ignore()}.
*
* @return the {@link RequestMatcher} to use
*/
private RequestMatcher getRequireCsrfProtectionMatcher() {
if(ignoredCsrfProtectionMatchers.isEmpty()) {
return requireCsrfProtectionMatcher;
}
return new AndRequestMatcher(requireCsrfProtectionMatcher, new NegatedRequestMatcher(new OrRequestMatcher(ignoredCsrfProtectionMatchers)));
}
/**
* Gets the default {@link AccessDeniedHandler} from the
* {@link ExceptionHandlingConfigurer#getAccessDeniedHandler()} or create a
* {@link AccessDeniedHandlerImpl} if not available.
*
* @param http the {@link HttpSecurityBuilder}
* @return the {@link AccessDeniedHandler}
*/
@SuppressWarnings("unchecked")
private AccessDeniedHandler getDefaultAccessDeniedHandler(H http) {
ExceptionHandlingConfigurer<H> exceptionConfig = http.getConfigurer(ExceptionHandlingConfigurer.class);
AccessDeniedHandler handler = null;
if(exceptionConfig != null) {
handler = exceptionConfig.getAccessDeniedHandler();
}
if(handler == null) {
handler = new AccessDeniedHandlerImpl();
}
return handler;
}
/**
* Gets the default {@link InvalidSessionStrategy} from the
* {@link SessionManagementConfigurer#getInvalidSessionStrategy()} or null
* if not available.
*
* @param http
* the {@link HttpSecurityBuilder}
* @return the {@link InvalidSessionStrategy}
*/
@SuppressWarnings("unchecked")
private InvalidSessionStrategy getInvalidSessionStrategy(H http) {
SessionManagementConfigurer<H> sessionManagement = http.getConfigurer(SessionManagementConfigurer.class);
if(sessionManagement == null) {
return null;
}
return sessionManagement.getInvalidSessionStrategy();
}
/**
* Creates the {@link AccessDeniedHandler} from the result of
* {@link #getDefaultAccessDeniedHandler(HttpSecurityBuilder)} and
* {@link #getInvalidSessionStrategy(HttpSecurityBuilder)}. If
* {@link #getInvalidSessionStrategy(HttpSecurityBuilder)} is non-null, then
* a {@link DelegatingAccessDeniedHandler} is used in combination with
* {@link InvalidSessionAccessDeniedHandler} and the
* {@link #getDefaultAccessDeniedHandler(HttpSecurityBuilder)}. Otherwise,
* only {@link #getDefaultAccessDeniedHandler(HttpSecurityBuilder)} is used.
*
* @param http the {@link HttpSecurityBuilder}
* @return the {@link AccessDeniedHandler}
*/
private AccessDeniedHandler createAccessDeniedHandler(H http) {
InvalidSessionStrategy invalidSessionStrategy = getInvalidSessionStrategy(http);
AccessDeniedHandler defaultAccessDeniedHandler = getDefaultAccessDeniedHandler(http);
if(invalidSessionStrategy == null) {
return defaultAccessDeniedHandler;
}
InvalidSessionAccessDeniedHandler invalidSessionDeniedHandler = new InvalidSessionAccessDeniedHandler(invalidSessionStrategy);
LinkedHashMap<Class<? extends AccessDeniedException>, AccessDeniedHandler> handlers =
new LinkedHashMap<Class<? extends AccessDeniedException>, AccessDeniedHandler>();
handlers.put(MissingCsrfTokenException.class, invalidSessionDeniedHandler);
return new DelegatingAccessDeniedHandler(handlers, defaultAccessDeniedHandler);
}
/**
* Allows registering {@link RequestMatcher} instances that should be
* ignored (even if the {@link HttpServletRequest} matches the
* {@link CsrfConfigurer#requireCsrfProtectionMatcher(RequestMatcher)}.
*
* @author Rob Winch
* @since 4.0
*/
private class IgnoreCsrfProtectionRegistry extends AbstractRequestMatcherRegistry<IgnoreCsrfProtectionRegistry>{
public CsrfConfigurer<H> and() {
return CsrfConfigurer.this;
}
protected IgnoreCsrfProtectionRegistry chainRequestMatchers(
List<RequestMatcher> requestMatchers) {
ignoredCsrfProtectionMatchers.addAll(requestMatchers);
return this;
}
}
} | 1 | 8,840 | The formatting is a little off here. | spring-projects-spring-security | java |
@@ -263,7 +263,7 @@ public class TestCustomFunctions extends LuceneTestCase {
PrintWriter pw = new PrintWriter(sw);
expected.printStackTrace(pw);
pw.flush();
- assertTrue(sw.toString().contains("JavascriptCompiler$CompiledExpression.evaluate(" + source + ")"));
+ assertTrue(sw.toString(), sw.toString().contains("JavascriptCompiler$CompiledExpression.evaluate(" + source + ")"));
}
/** test that namespaces work with custom expressions. */ | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.expressions.js;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.lang.reflect.Method;
import java.text.ParseException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.expressions.Expression;
import org.apache.lucene.util.LuceneTestCase;
import org.objectweb.asm.ClassWriter;
import org.objectweb.asm.Opcodes;
import org.objectweb.asm.Type;
import org.objectweb.asm.commons.GeneratorAdapter;
/** Tests customing the function map */
public class TestCustomFunctions extends LuceneTestCase {
private static double DELTA = 0.0000001;
/** empty list of methods */
public void testEmpty() throws Exception {
Map<String,Method> functions = Collections.emptyMap();
ParseException expected = expectThrows(ParseException.class, () -> {
JavascriptCompiler.compile("sqrt(20)", functions, getClass().getClassLoader());
});
assertEquals("Invalid expression 'sqrt(20)': Unrecognized function call (sqrt).", expected.getMessage());
assertEquals(expected.getErrorOffset(), 0);
}
/** using the default map explicitly */
public void testDefaultList() throws Exception {
Map<String,Method> functions = JavascriptCompiler.DEFAULT_FUNCTIONS;
Expression expr = JavascriptCompiler.compile("sqrt(20)", functions, getClass().getClassLoader());
assertEquals(Math.sqrt(20), expr.evaluate(null), DELTA);
}
public static double zeroArgMethod() { return 5; }
/** tests a method with no arguments */
public void testNoArgMethod() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", getClass().getMethod("zeroArgMethod"));
Expression expr = JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader());
assertEquals(5, expr.evaluate(null), DELTA);
}
public static double oneArgMethod(double arg1) { return 3 + arg1; }
/** tests a method with one arguments */
public void testOneArgMethod() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", getClass().getMethod("oneArgMethod", double.class));
Expression expr = JavascriptCompiler.compile("foo(3)", functions, getClass().getClassLoader());
assertEquals(6, expr.evaluate(null), DELTA);
}
public static double threeArgMethod(double arg1, double arg2, double arg3) { return arg1 + arg2 + arg3; }
/** tests a method with three arguments */
public void testThreeArgMethod() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", getClass().getMethod("threeArgMethod", double.class, double.class, double.class));
Expression expr = JavascriptCompiler.compile("foo(3, 4, 5)", functions, getClass().getClassLoader());
assertEquals(12, expr.evaluate(null), DELTA);
}
/** tests a map with 2 functions */
public void testTwoMethods() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", getClass().getMethod("zeroArgMethod"));
functions.put("bar", getClass().getMethod("oneArgMethod", double.class));
Expression expr = JavascriptCompiler.compile("foo() + bar(3)", functions, getClass().getClassLoader());
assertEquals(11, expr.evaluate(null), DELTA);
}
/** tests invalid methods that are not allowed to become variables to be mapped */
public void testInvalidVariableMethods() {
ParseException expected = expectThrows(ParseException.class, () -> {
JavascriptCompiler.compile("method()");
});
assertEquals("Invalid expression 'method()': Unrecognized function call (method).", expected.getMessage());
assertEquals(0, expected.getErrorOffset());
expected = expectThrows(ParseException.class, () -> {
JavascriptCompiler.compile("method.method(1)");
});
assertEquals("Invalid expression 'method.method(1)': Unrecognized function call (method.method).", expected.getMessage());
assertEquals(0, expected.getErrorOffset());
expected = expectThrows(ParseException.class, () -> {
JavascriptCompiler.compile("1 + method()");
});
assertEquals("Invalid expression '1 + method()': Unrecognized function call (method).", expected.getMessage());
assertEquals(4, expected.getErrorOffset());
}
public static String bogusReturnType() { return "bogus!"; }
/** wrong return type: must be double */
public void testWrongReturnType() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", getClass().getMethod("bogusReturnType"));
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader());
});
assertTrue(expected.getMessage().contains("does not return a double"));
}
public static double bogusParameterType(String s) { return 0; }
/** wrong param type: must be doubles */
public void testWrongParameterType() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", getClass().getMethod("bogusParameterType", String.class));
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
JavascriptCompiler.compile("foo(2)", functions, getClass().getClassLoader());
});
assertTrue(expected.getMessage().contains("must take only double parameters"));
}
public double nonStaticMethod() { return 0; }
/** wrong modifiers: must be static */
public void testWrongNotStatic() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", getClass().getMethod("nonStaticMethod"));
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader());
});
assertTrue(expected.getMessage().contains("is not static"));
}
static double nonPublicMethod() { return 0; }
/** wrong modifiers: must be public */
public void testWrongNotPublic() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", getClass().getDeclaredMethod("nonPublicMethod"));
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader());
});
assertTrue(expected.getMessage().contains("not public"));
}
static class NestedNotPublic {
public static double method() { return 0; }
}
/** wrong class modifiers: class containing method is not public */
public void testWrongNestedNotPublic() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", NestedNotPublic.class.getMethod("method"));
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader());
});
assertTrue(expected.getMessage().contains("not public"));
}
/** Classloader that can be used to create a fake static class that has one method returning a static var */
static final class Loader extends ClassLoader implements Opcodes {
Loader(ClassLoader parent) {
super(parent);
}
public Class<?> createFakeClass() {
String className = TestCustomFunctions.class.getName() + "$Foo";
ClassWriter classWriter = new ClassWriter(ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS);
classWriter.visit(Opcodes.V1_5, ACC_PUBLIC | ACC_SUPER | ACC_FINAL | ACC_SYNTHETIC,
className.replace('.', '/'), null, Type.getInternalName(Object.class), null);
org.objectweb.asm.commons.Method m = org.objectweb.asm.commons.Method.getMethod("void <init>()");
GeneratorAdapter constructor = new GeneratorAdapter(ACC_PRIVATE | ACC_SYNTHETIC, m, null, null, classWriter);
constructor.loadThis();
constructor.loadArgs();
constructor.invokeConstructor(Type.getType(Object.class), m);
constructor.returnValue();
constructor.endMethod();
GeneratorAdapter gen = new GeneratorAdapter(ACC_STATIC | ACC_PUBLIC | ACC_SYNTHETIC,
org.objectweb.asm.commons.Method.getMethod("double bar()"), null, null, classWriter);
gen.push(2.0);
gen.returnValue();
gen.endMethod();
byte[] bc = classWriter.toByteArray();
return defineClass(className, bc, 0, bc.length);
}
}
/** uses this test with a different classloader and tries to
* register it using the default classloader, which should fail */
public void testClassLoader() throws Exception {
ClassLoader thisLoader = getClass().getClassLoader();
Loader childLoader = new Loader(thisLoader);
Class<?> fooClass = childLoader.createFakeClass();
Method barMethod = fooClass.getMethod("bar");
Map<String,Method> functions = Collections.singletonMap("bar", barMethod);
assertNotSame(thisLoader, fooClass.getClassLoader());
assertNotSame(thisLoader, barMethod.getDeclaringClass().getClassLoader());
// this should pass:
Expression expr = JavascriptCompiler.compile("bar()", functions, childLoader);
assertEquals(2.0, expr.evaluate(null), DELTA);
// use our classloader, not the foreign one, which should fail!
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
JavascriptCompiler.compile("bar()", functions, thisLoader);
});
assertTrue(expected.getMessage().contains("is not declared by a class which is accessible by the given parent ClassLoader"));
// mix foreign and default functions
Map<String,Method> mixedFunctions = new HashMap<>(JavascriptCompiler.DEFAULT_FUNCTIONS);
mixedFunctions.putAll(functions);
expr = JavascriptCompiler.compile("bar()", mixedFunctions, childLoader);
assertEquals(2.0, expr.evaluate(null), DELTA);
expr = JavascriptCompiler.compile("sqrt(20)", mixedFunctions, childLoader);
assertEquals(Math.sqrt(20), expr.evaluate(null), DELTA);
// use our classloader, not the foreign one, which should fail!
expected = expectThrows(IllegalArgumentException.class, () -> {
JavascriptCompiler.compile("bar()", mixedFunctions, thisLoader);
});
assertTrue(expected.getMessage().contains("is not declared by a class which is accessible by the given parent ClassLoader"));
}
static String MESSAGE = "This should not happen but it happens";
public static class StaticThrowingException {
public static double method() { throw new ArithmeticException(MESSAGE); }
}
/** the method throws an exception. We should check the stack trace that it contains the source code of the expression as file name. */
public void testThrowingException() throws Exception {
Map<String,Method> functions = new HashMap<>();
functions.put("foo", StaticThrowingException.class.getMethod("method"));
String source = "3 * foo() / 5";
Expression expr = JavascriptCompiler.compile(source, functions, getClass().getClassLoader());
ArithmeticException expected = expectThrows(ArithmeticException.class, () -> {
expr.evaluate(null);
});
assertEquals(MESSAGE, expected.getMessage());
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
expected.printStackTrace(pw);
pw.flush();
assertTrue(sw.toString().contains("JavascriptCompiler$CompiledExpression.evaluate(" + source + ")"));
}
/** test that namespaces work with custom expressions. */
public void testNamespaces() throws Exception {
Map<String, Method> functions = new HashMap<>();
functions.put("foo.bar", getClass().getMethod("zeroArgMethod"));
String source = "foo.bar()";
Expression expr = JavascriptCompiler.compile(source, functions, getClass().getClassLoader());
assertEquals(5, expr.evaluate(null), DELTA);
}
}
| 1 | 36,869 | Perhaps we should import assertj for tests. These assertions are so much cleaner with assertj. Don't know whether hamcrest equivalent exist (maybe it does). | apache-lucene-solr | java |
@@ -2449,16 +2449,9 @@ void ProtocolGame::sendAddCreature(const Creature* creature, const Position& pos
sendMagicEffect(pos, CONST_ME_TELEPORT);
}
- sendInventoryItem(CONST_SLOT_HEAD, player->getInventoryItem(CONST_SLOT_HEAD));
- sendInventoryItem(CONST_SLOT_NECKLACE, player->getInventoryItem(CONST_SLOT_NECKLACE));
- sendInventoryItem(CONST_SLOT_BACKPACK, player->getInventoryItem(CONST_SLOT_BACKPACK));
- sendInventoryItem(CONST_SLOT_ARMOR, player->getInventoryItem(CONST_SLOT_ARMOR));
- sendInventoryItem(CONST_SLOT_RIGHT, player->getInventoryItem(CONST_SLOT_RIGHT));
- sendInventoryItem(CONST_SLOT_LEFT, player->getInventoryItem(CONST_SLOT_LEFT));
- sendInventoryItem(CONST_SLOT_LEGS, player->getInventoryItem(CONST_SLOT_LEGS));
- sendInventoryItem(CONST_SLOT_FEET, player->getInventoryItem(CONST_SLOT_FEET));
- sendInventoryItem(CONST_SLOT_RING, player->getInventoryItem(CONST_SLOT_RING));
- sendInventoryItem(CONST_SLOT_AMMO, player->getInventoryItem(CONST_SLOT_AMMO));
+ for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
+ sendInventoryItem((slots_t)i, player->getInventoryItem((slots_t)i));
+ }
sendStats();
sendSkills(); | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2018 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <boost/range/adaptor/reversed.hpp>
#include "protocolgame.h"
#include "outputmessage.h"
#include "player.h"
#include "configmanager.h"
#include "actions.h"
#include "game.h"
#include "iologindata.h"
#include "iomarket.h"
#include "waitlist.h"
#include "ban.h"
#include "scheduler.h"
extern ConfigManager g_config;
extern Actions actions;
extern CreatureEvents* g_creatureEvents;
extern Chat* g_chat;
void ProtocolGame::release()
{
//dispatcher thread
if (player && player->client == shared_from_this()) {
player->client.reset();
player->decrementReferenceCounter();
player = nullptr;
}
OutputMessagePool::getInstance().removeProtocolFromAutosend(shared_from_this());
Protocol::release();
}
void ProtocolGame::login(const std::string& name, uint32_t accountId, OperatingSystem_t operatingSystem)
{
//dispatcher thread
Player* foundPlayer = g_game.getPlayerByName(name);
if (!foundPlayer || g_config.getBoolean(ConfigManager::ALLOW_CLONES)) {
player = new Player(getThis());
player->setName(name);
player->incrementReferenceCounter();
player->setID();
if (!IOLoginData::preloadPlayer(player, name)) {
disconnectClient("Your character could not be loaded.");
return;
}
if (IOBan::isPlayerNamelocked(player->getGUID())) {
disconnectClient("Your character has been namelocked.");
return;
}
if (g_game.getGameState() == GAME_STATE_CLOSING && !player->hasFlag(PlayerFlag_CanAlwaysLogin)) {
disconnectClient("The game is just going down.\nPlease try again later.");
return;
}
if (g_game.getGameState() == GAME_STATE_CLOSED && !player->hasFlag(PlayerFlag_CanAlwaysLogin)) {
disconnectClient("Server is currently closed.\nPlease try again later.");
return;
}
if (g_config.getBoolean(ConfigManager::ONE_PLAYER_ON_ACCOUNT) && player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER && g_game.getPlayerByAccount(player->getAccount())) {
disconnectClient("You may only login with one character\nof your account at the same time.");
return;
}
if (!player->hasFlag(PlayerFlag_CannotBeBanned)) {
BanInfo banInfo;
if (IOBan::isAccountBanned(accountId, banInfo)) {
if (banInfo.reason.empty()) {
banInfo.reason = "(none)";
}
std::ostringstream ss;
if (banInfo.expiresAt > 0) {
ss << "Your account has been banned until " << formatDateShort(banInfo.expiresAt) << " by " << banInfo.bannedBy << ".\n\nReason specified:\n" << banInfo.reason;
} else {
ss << "Your account has been permanently banned by " << banInfo.bannedBy << ".\n\nReason specified:\n" << banInfo.reason;
}
disconnectClient(ss.str());
return;
}
}
WaitingList& waitingList = WaitingList::getInstance();
if (!waitingList.clientLogin(player)) {
uint32_t currentSlot = waitingList.getClientSlot(player);
uint32_t retryTime = WaitingList::getTime(currentSlot);
std::ostringstream ss;
ss << "Too many players online.\nYou are at place "
<< currentSlot << " on the waiting list.";
auto output = OutputMessagePool::getOutputMessage();
output->addByte(0x16);
output->addString(ss.str());
output->addByte(retryTime);
send(output);
disconnect();
return;
}
if (!IOLoginData::loadPlayerById(player, player->getGUID())) {
disconnectClient("Your character could not be loaded.");
return;
}
player->setOperatingSystem(operatingSystem);
if (!g_game.placeCreature(player, player->getLoginPosition())) {
if (!g_game.placeCreature(player, player->getTemplePosition(), false, true)) {
disconnectClient("Temple position is wrong. Contact the administrator.");
return;
}
}
if (operatingSystem >= CLIENTOS_OTCLIENT_LINUX) {
player->registerCreatureEvent("ExtendedOpcode");
}
player->lastIP = player->getIP();
player->lastLoginSaved = std::max<time_t>(time(nullptr), player->lastLoginSaved + 1);
acceptPackets = true;
} else {
if (eventConnect != 0 || !g_config.getBoolean(ConfigManager::REPLACE_KICK_ON_LOGIN)) {
//Already trying to connect
disconnectClient("You are already logged in.");
return;
}
if (foundPlayer->client) {
foundPlayer->disconnect();
foundPlayer->isConnecting = true;
eventConnect = g_scheduler.addEvent(createSchedulerTask(1000, std::bind(&ProtocolGame::connect, getThis(), foundPlayer->getID(), operatingSystem)));
} else {
connect(foundPlayer->getID(), operatingSystem);
}
}
OutputMessagePool::getInstance().addProtocolToAutosend(shared_from_this());
}
void ProtocolGame::connect(uint32_t playerId, OperatingSystem_t operatingSystem)
{
eventConnect = 0;
Player* foundPlayer = g_game.getPlayerByID(playerId);
if (!foundPlayer || foundPlayer->client) {
disconnectClient("You are already logged in.");
return;
}
if (isConnectionExpired()) {
//ProtocolGame::release() has been called at this point and the Connection object
//no longer exists, so we return to prevent leakage of the Player.
return;
}
player = foundPlayer;
player->incrementReferenceCounter();
g_chat->removeUserFromAllChannels(*player);
player->clearModalWindows();
player->setOperatingSystem(operatingSystem);
player->isConnecting = false;
player->client = getThis();
sendAddCreature(player, player->getPosition(), 0, false);
player->lastIP = player->getIP();
player->lastLoginSaved = std::max<time_t>(time(nullptr), player->lastLoginSaved + 1);
acceptPackets = true;
}
void ProtocolGame::logout(bool displayEffect, bool forced)
{
//dispatcher thread
if (!player) {
return;
}
if (!player->isRemoved()) {
if (!forced) {
if (!player->isAccessPlayer()) {
if (player->getTile()->hasFlag(TILESTATE_NOLOGOUT)) {
player->sendCancelMessage(RETURNVALUE_YOUCANNOTLOGOUTHERE);
return;
}
if (!player->getTile()->hasFlag(TILESTATE_PROTECTIONZONE) && player->hasCondition(CONDITION_INFIGHT)) {
player->sendCancelMessage(RETURNVALUE_YOUMAYNOTLOGOUTDURINGAFIGHT);
return;
}
}
//scripting event - onLogout
if (!g_creatureEvents->playerLogout(player)) {
//Let the script handle the error message
return;
}
}
if (displayEffect && player->getHealth() > 0) {
g_game.addMagicEffect(player->getPosition(), CONST_ME_POFF);
}
}
disconnect();
g_game.removeCreature(player);
}
void ProtocolGame::onRecvFirstMessage(NetworkMessage& msg)
{
if (g_game.getGameState() == GAME_STATE_SHUTDOWN) {
disconnect();
return;
}
OperatingSystem_t operatingSystem = static_cast<OperatingSystem_t>(msg.get<uint16_t>());
version = msg.get<uint16_t>();
msg.skipBytes(7); // U32 client version, U8 client type, U16 dat revision
if (!Protocol::RSA_decrypt(msg)) {
disconnect();
return;
}
xtea::key key;
key[0] = msg.get<uint32_t>();
key[1] = msg.get<uint32_t>();
key[2] = msg.get<uint32_t>();
key[3] = msg.get<uint32_t>();
enableXTEAEncryption();
setXTEAKey(std::move(key));
if (operatingSystem >= CLIENTOS_OTCLIENT_LINUX) {
NetworkMessage opcodeMessage;
opcodeMessage.addByte(0x32);
opcodeMessage.addByte(0x00);
opcodeMessage.add<uint16_t>(0x00);
writeToOutputBuffer(opcodeMessage);
}
msg.skipBytes(1); // gamemaster flag
std::string sessionKey = msg.getString();
auto sessionArgs = explodeString(sessionKey, "\n", 4);
if (sessionArgs.size() != 4) {
disconnect();
return;
}
std::string& accountName = sessionArgs[0];
std::string& password = sessionArgs[1];
std::string& token = sessionArgs[2];
uint32_t tokenTime = 0;
try {
tokenTime = std::stoul(sessionArgs[3]);
} catch (const std::invalid_argument&) {
disconnectClient("Malformed token packet.");
return;
} catch (const std::out_of_range&) {
disconnectClient("Token time is too long.");
return;
}
if (accountName.empty()) {
disconnectClient("You must enter your account name.");
return;
}
std::string characterName = msg.getString();
uint32_t timeStamp = msg.get<uint32_t>();
uint8_t randNumber = msg.getByte();
if (challengeTimestamp != timeStamp || challengeRandom != randNumber) {
disconnect();
return;
}
if (version < CLIENT_VERSION_MIN || version > CLIENT_VERSION_MAX) {
std::ostringstream ss;
ss << "Only clients with protocol " << CLIENT_VERSION_STR << " allowed!";
disconnectClient(ss.str());
return;
}
if (g_game.getGameState() == GAME_STATE_STARTUP) {
disconnectClient("Gameworld is starting up. Please wait.");
return;
}
if (g_game.getGameState() == GAME_STATE_MAINTAIN) {
disconnectClient("Gameworld is under maintenance. Please re-connect in a while.");
return;
}
BanInfo banInfo;
if (IOBan::isIpBanned(getIP(), banInfo)) {
if (banInfo.reason.empty()) {
banInfo.reason = "(none)";
}
std::ostringstream ss;
ss << "Your IP has been banned until " << formatDateShort(banInfo.expiresAt) << " by " << banInfo.bannedBy << ".\n\nReason specified:\n" << banInfo.reason;
disconnectClient(ss.str());
return;
}
uint32_t accountId = IOLoginData::gameworldAuthentication(accountName, password, characterName, token, tokenTime);
if (accountId == 0) {
disconnectClient("Account name or password is not correct.");
return;
}
g_dispatcher.addTask(createTask(std::bind(&ProtocolGame::login, getThis(), characterName, accountId, operatingSystem)));
}
void ProtocolGame::onConnect()
{
auto output = OutputMessagePool::getOutputMessage();
static std::random_device rd;
static std::ranlux24 generator(rd());
static std::uniform_int_distribution<uint16_t> randNumber(0x00, 0xFF);
// Skip checksum
output->skipBytes(sizeof(uint32_t));
// Packet length & type
output->add<uint16_t>(0x0006);
output->addByte(0x1F);
// Add timestamp & random number
challengeTimestamp = static_cast<uint32_t>(time(nullptr));
output->add<uint32_t>(challengeTimestamp);
challengeRandom = randNumber(generator);
output->addByte(challengeRandom);
// Go back and write checksum
output->skipBytes(-12);
output->add<uint32_t>(adlerChecksum(output->getOutputBuffer() + sizeof(uint32_t), 8));
send(output);
}
void ProtocolGame::disconnectClient(const std::string& message) const
{
auto output = OutputMessagePool::getOutputMessage();
output->addByte(0x14);
output->addString(message);
send(output);
disconnect();
}
void ProtocolGame::writeToOutputBuffer(const NetworkMessage& msg)
{
auto out = getOutputBuffer(msg.getLength());
out->append(msg);
}
void ProtocolGame::parsePacket(NetworkMessage& msg)
{
if (!acceptPackets || g_game.getGameState() == GAME_STATE_SHUTDOWN || msg.getLength() <= 0) {
return;
}
uint8_t recvbyte = msg.getByte();
if (!player) {
if (recvbyte == 0x0F) {
disconnect();
}
return;
}
//a dead player can not performs actions
if (player->isRemoved() || player->getHealth() <= 0) {
if (recvbyte == 0x0F) {
disconnect();
return;
}
if (recvbyte != 0x14) {
return;
}
}
switch (recvbyte) {
case 0x14: g_dispatcher.addTask(createTask(std::bind(&ProtocolGame::logout, getThis(), true, false))); break;
case 0x1D: addGameTask(&Game::playerReceivePingBack, player->getID()); break;
case 0x1E: addGameTask(&Game::playerReceivePing, player->getID()); break;
case 0x32: parseExtendedOpcode(msg); break; //otclient extended opcode
case 0x64: parseAutoWalk(msg); break;
case 0x65: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTH); break;
case 0x66: addGameTask(&Game::playerMove, player->getID(), DIRECTION_EAST); break;
case 0x67: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTH); break;
case 0x68: addGameTask(&Game::playerMove, player->getID(), DIRECTION_WEST); break;
case 0x69: addGameTask(&Game::playerStopAutoWalk, player->getID()); break;
case 0x6A: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTHEAST); break;
case 0x6B: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTHEAST); break;
case 0x6C: addGameTask(&Game::playerMove, player->getID(), DIRECTION_SOUTHWEST); break;
case 0x6D: addGameTask(&Game::playerMove, player->getID(), DIRECTION_NORTHWEST); break;
case 0x6F: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_NORTH); break;
case 0x70: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_EAST); break;
case 0x71: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_SOUTH); break;
case 0x72: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerTurn, player->getID(), DIRECTION_WEST); break;
case 0x77: parseEquipObject(msg); break;
case 0x78: parseThrow(msg); break;
case 0x79: parseLookInShop(msg); break;
case 0x7A: parsePlayerPurchase(msg); break;
case 0x7B: parsePlayerSale(msg); break;
case 0x7C: addGameTask(&Game::playerCloseShop, player->getID()); break;
case 0x7D: parseRequestTrade(msg); break;
case 0x7E: parseLookInTrade(msg); break;
case 0x7F: addGameTask(&Game::playerAcceptTrade, player->getID()); break;
case 0x80: addGameTask(&Game::playerCloseTrade, player->getID()); break;
case 0x82: parseUseItem(msg); break;
case 0x83: parseUseItemEx(msg); break;
case 0x84: parseUseWithCreature(msg); break;
case 0x85: parseRotateItem(msg); break;
case 0x87: parseCloseContainer(msg); break;
case 0x88: parseUpArrowContainer(msg); break;
case 0x89: parseTextWindow(msg); break;
case 0x8A: parseHouseWindow(msg); break;
case 0x8C: parseLookAt(msg); break;
case 0x8D: parseLookInBattleList(msg); break;
case 0x8E: /* join aggression */ break;
case 0x96: parseSay(msg); break;
case 0x97: addGameTask(&Game::playerRequestChannels, player->getID()); break;
case 0x98: parseOpenChannel(msg); break;
case 0x99: parseCloseChannel(msg); break;
case 0x9A: parseOpenPrivateChannel(msg); break;
case 0x9E: addGameTask(&Game::playerCloseNpcChannel, player->getID()); break;
case 0xA0: parseFightModes(msg); break;
case 0xA1: parseAttack(msg); break;
case 0xA2: parseFollow(msg); break;
case 0xA3: parseInviteToParty(msg); break;
case 0xA4: parseJoinParty(msg); break;
case 0xA5: parseRevokePartyInvite(msg); break;
case 0xA6: parsePassPartyLeadership(msg); break;
case 0xA7: addGameTask(&Game::playerLeaveParty, player->getID()); break;
case 0xA8: parseEnableSharedPartyExperience(msg); break;
case 0xAA: addGameTask(&Game::playerCreatePrivateChannel, player->getID()); break;
case 0xAB: parseChannelInvite(msg); break;
case 0xAC: parseChannelExclude(msg); break;
case 0xBE: addGameTask(&Game::playerCancelAttackAndFollow, player->getID()); break;
case 0xC9: /* update tile */ break;
case 0xCA: parseUpdateContainer(msg); break;
case 0xCB: parseBrowseField(msg); break;
case 0xCC: parseSeekInContainer(msg); break;
case 0xD2: addGameTask(&Game::playerRequestOutfit, player->getID()); break;
case 0xD3: parseSetOutfit(msg); break;
case 0xD4: parseToggleMount(msg); break;
case 0xDC: parseAddVip(msg); break;
case 0xDD: parseRemoveVip(msg); break;
case 0xDE: parseEditVip(msg); break;
case 0xE6: parseBugReport(msg); break;
case 0xE7: /* thank you */ break;
case 0xE8: parseDebugAssert(msg); break;
case 0xF0: addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerShowQuestLog, player->getID()); break;
case 0xF1: parseQuestLine(msg); break;
case 0xF2: parseRuleViolationReport(msg); break;
case 0xF3: /* get object info */ break;
case 0xF4: parseMarketLeave(); break;
case 0xF5: parseMarketBrowse(msg); break;
case 0xF6: parseMarketCreateOffer(msg); break;
case 0xF7: parseMarketCancelOffer(msg); break;
case 0xF8: parseMarketAcceptOffer(msg); break;
case 0xF9: parseModalWindowAnswer(msg); break;
default:
// std::cout << "Player: " << player->getName() << " sent an unknown packet header: 0x" << std::hex << static_cast<uint16_t>(recvbyte) << std::dec << "!" << std::endl;
break;
}
if (msg.isOverrun()) {
disconnect();
}
}
void ProtocolGame::GetTileDescription(const Tile* tile, NetworkMessage& msg)
{
msg.add<uint16_t>(0x00); //environmental effects
int32_t count;
Item* ground = tile->getGround();
if (ground) {
msg.addItem(ground);
count = 1;
} else {
count = 0;
}
const TileItemVector* items = tile->getItemList();
if (items) {
for (auto it = items->getBeginTopItem(), end = items->getEndTopItem(); it != end; ++it) {
msg.addItem(*it);
count++;
if (count == 9 && tile->getPosition() == player->getPosition()) {
break;
} else if (count == 10) {
return;
}
}
}
const CreatureVector* creatures = tile->getCreatures();
if (creatures) {
bool playerAdded = false;
for (const Creature* creature : boost::adaptors::reverse(*creatures)) {
if (!player->canSeeCreature(creature)) {
continue;
}
if (tile->getPosition() == player->getPosition() && count == 9 && !playerAdded) {
creature = player;
}
if (creature->getID() == player->getID()) {
playerAdded = true;
}
bool known;
uint32_t removedKnown;
checkCreatureAsKnown(creature->getID(), known, removedKnown);
AddCreature(msg, creature, known, removedKnown);
if (++count == 10) {
return;
}
}
}
if (items) {
for (auto it = items->getBeginDownItem(), end = items->getEndDownItem(); it != end; ++it) {
msg.addItem(*it);
if (++count == 10) {
return;
}
}
}
}
void ProtocolGame::GetMapDescription(int32_t x, int32_t y, int32_t z, int32_t width, int32_t height, NetworkMessage& msg)
{
int32_t skip = -1;
int32_t startz, endz, zstep;
if (z > 7) {
startz = z - 2;
endz = std::min<int32_t>(MAP_MAX_LAYERS - 1, z + 2);
zstep = 1;
} else {
startz = 7;
endz = 0;
zstep = -1;
}
for (int32_t nz = startz; nz != endz + zstep; nz += zstep) {
GetFloorDescription(msg, x, y, nz, width, height, z - nz, skip);
}
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
void ProtocolGame::GetFloorDescription(NetworkMessage& msg, int32_t x, int32_t y, int32_t z, int32_t width, int32_t height, int32_t offset, int32_t& skip)
{
for (int32_t nx = 0; nx < width; nx++) {
for (int32_t ny = 0; ny < height; ny++) {
Tile* tile = g_game.map.getTile(x + nx + offset, y + ny + offset, z);
if (tile) {
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
skip = 0;
GetTileDescription(tile, msg);
} else if (skip == 0xFE) {
msg.addByte(0xFF);
msg.addByte(0xFF);
skip = -1;
} else {
++skip;
}
}
}
}
void ProtocolGame::checkCreatureAsKnown(uint32_t id, bool& known, uint32_t& removedKnown)
{
auto result = knownCreatureSet.insert(id);
if (!result.second) {
known = true;
return;
}
known = false;
if (knownCreatureSet.size() > 1300) {
// Look for a creature to remove
for (auto it = knownCreatureSet.begin(), end = knownCreatureSet.end(); it != end; ++it) {
Creature* creature = g_game.getCreatureByID(*it);
if (!canSee(creature)) {
removedKnown = *it;
knownCreatureSet.erase(it);
return;
}
}
// Bad situation. Let's just remove anyone.
auto it = knownCreatureSet.begin();
if (*it == id) {
++it;
}
removedKnown = *it;
knownCreatureSet.erase(it);
} else {
removedKnown = 0;
}
}
bool ProtocolGame::canSee(const Creature* c) const
{
if (!c || !player || c->isRemoved()) {
return false;
}
if (!player->canSeeCreature(c)) {
return false;
}
return canSee(c->getPosition());
}
bool ProtocolGame::canSee(const Position& pos) const
{
return canSee(pos.x, pos.y, pos.z);
}
bool ProtocolGame::canSee(int32_t x, int32_t y, int32_t z) const
{
if (!player) {
return false;
}
const Position& myPos = player->getPosition();
if (myPos.z <= 7) {
//we are on ground level or above (7 -> 0)
//view is from 7 -> 0
if (z > 7) {
return false;
}
} else if (myPos.z >= 8) {
//we are underground (8 -> 15)
//view is +/- 2 from the floor we stand on
if (std::abs(myPos.getZ() - z) > 2) {
return false;
}
}
//negative offset means that the action taken place is on a lower floor than ourself
int32_t offsetz = myPos.getZ() - z;
if ((x >= myPos.getX() - 8 + offsetz) && (x <= myPos.getX() + 9 + offsetz) &&
(y >= myPos.getY() - 6 + offsetz) && (y <= myPos.getY() + 7 + offsetz)) {
return true;
}
return false;
}
// Parse methods
void ProtocolGame::parseChannelInvite(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerChannelInvite, player->getID(), name);
}
void ProtocolGame::parseChannelExclude(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerChannelExclude, player->getID(), name);
}
void ProtocolGame::parseOpenChannel(NetworkMessage& msg)
{
uint16_t channelId = msg.get<uint16_t>();
addGameTask(&Game::playerOpenChannel, player->getID(), channelId);
}
void ProtocolGame::parseCloseChannel(NetworkMessage& msg)
{
uint16_t channelId = msg.get<uint16_t>();
addGameTask(&Game::playerCloseChannel, player->getID(), channelId);
}
void ProtocolGame::parseOpenPrivateChannel(NetworkMessage& msg)
{
const std::string receiver = msg.getString();
addGameTask(&Game::playerOpenPrivateChannel, player->getID(), receiver);
}
void ProtocolGame::parseAutoWalk(NetworkMessage& msg)
{
uint8_t numdirs = msg.getByte();
if (numdirs == 0 || (msg.getBufferPosition() + numdirs) != (msg.getLength() + 8)) {
return;
}
msg.skipBytes(numdirs);
std::forward_list<Direction> path;
for (uint8_t i = 0; i < numdirs; ++i) {
uint8_t rawdir = msg.getPreviousByte();
switch (rawdir) {
case 1: path.push_front(DIRECTION_EAST); break;
case 2: path.push_front(DIRECTION_NORTHEAST); break;
case 3: path.push_front(DIRECTION_NORTH); break;
case 4: path.push_front(DIRECTION_NORTHWEST); break;
case 5: path.push_front(DIRECTION_WEST); break;
case 6: path.push_front(DIRECTION_SOUTHWEST); break;
case 7: path.push_front(DIRECTION_SOUTH); break;
case 8: path.push_front(DIRECTION_SOUTHEAST); break;
default: break;
}
}
if (path.empty()) {
return;
}
addGameTask(&Game::playerAutoWalk, player->getID(), path);
}
void ProtocolGame::parseSetOutfit(NetworkMessage& msg)
{
Outfit_t newOutfit;
newOutfit.lookType = msg.get<uint16_t>();
newOutfit.lookHead = msg.getByte();
newOutfit.lookBody = msg.getByte();
newOutfit.lookLegs = msg.getByte();
newOutfit.lookFeet = msg.getByte();
newOutfit.lookAddons = msg.getByte();
newOutfit.lookMount = msg.get<uint16_t>();
addGameTask(&Game::playerChangeOutfit, player->getID(), newOutfit);
}
void ProtocolGame::parseToggleMount(NetworkMessage& msg)
{
bool mount = msg.getByte() != 0;
addGameTask(&Game::playerToggleMount, player->getID(), mount);
}
void ProtocolGame::parseUseItem(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
uint8_t index = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseItem, player->getID(), pos, stackpos, index, spriteId);
}
void ProtocolGame::parseUseItemEx(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t fromSpriteId = msg.get<uint16_t>();
uint8_t fromStackPos = msg.getByte();
Position toPos = msg.getPosition();
uint16_t toSpriteId = msg.get<uint16_t>();
uint8_t toStackPos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseItemEx, player->getID(), fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId);
}
void ProtocolGame::parseUseWithCreature(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t fromStackPos = msg.getByte();
uint32_t creatureId = msg.get<uint32_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerUseWithCreature, player->getID(), fromPos, fromStackPos, creatureId, spriteId);
}
void ProtocolGame::parseCloseContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerCloseContainer, player->getID(), cid);
}
void ProtocolGame::parseUpArrowContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerMoveUpContainer, player->getID(), cid);
}
void ProtocolGame::parseUpdateContainer(NetworkMessage& msg)
{
uint8_t cid = msg.getByte();
addGameTask(&Game::playerUpdateContainer, player->getID(), cid);
}
void ProtocolGame::parseThrow(NetworkMessage& msg)
{
Position fromPos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t fromStackpos = msg.getByte();
Position toPos = msg.getPosition();
uint8_t count = msg.getByte();
if (toPos != fromPos) {
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerMoveThing, player->getID(), fromPos, spriteId, fromStackpos, toPos, count);
}
}
void ProtocolGame::parseLookAt(NetworkMessage& msg)
{
Position pos = msg.getPosition();
msg.skipBytes(2); // spriteId
uint8_t stackpos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookAt, player->getID(), pos, stackpos);
}
void ProtocolGame::parseLookInBattleList(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInBattleList, player->getID(), creatureId);
}
void ProtocolGame::parseSay(NetworkMessage& msg)
{
std::string receiver;
uint16_t channelId;
SpeakClasses type = static_cast<SpeakClasses>(msg.getByte());
switch (type) {
case TALKTYPE_PRIVATE_TO:
case TALKTYPE_PRIVATE_RED_TO:
receiver = msg.getString();
channelId = 0;
break;
case TALKTYPE_CHANNEL_Y:
case TALKTYPE_CHANNEL_R1:
channelId = msg.get<uint16_t>();
break;
default:
channelId = 0;
break;
}
const std::string text = msg.getString();
if (text.length() > 255) {
return;
}
addGameTask(&Game::playerSay, player->getID(), channelId, type, receiver, text);
}
void ProtocolGame::parseFightModes(NetworkMessage& msg)
{
uint8_t rawFightMode = msg.getByte(); // 1 - offensive, 2 - balanced, 3 - defensive
uint8_t rawChaseMode = msg.getByte(); // 0 - stand while fightning, 1 - chase opponent
uint8_t rawSecureMode = msg.getByte(); // 0 - can't attack unmarked, 1 - can attack unmarked
// uint8_t rawPvpMode = msg.getByte(); // pvp mode introduced in 10.0
fightMode_t fightMode;
if (rawFightMode == 1) {
fightMode = FIGHTMODE_ATTACK;
} else if (rawFightMode == 2) {
fightMode = FIGHTMODE_BALANCED;
} else {
fightMode = FIGHTMODE_DEFENSE;
}
addGameTask(&Game::playerSetFightModes, player->getID(), fightMode, rawChaseMode != 0, rawSecureMode != 0);
}
void ProtocolGame::parseAttack(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
// msg.get<uint32_t>(); creatureId (same as above)
addGameTask(&Game::playerSetAttackedCreature, player->getID(), creatureId);
}
void ProtocolGame::parseFollow(NetworkMessage& msg)
{
uint32_t creatureId = msg.get<uint32_t>();
// msg.get<uint32_t>(); creatureId (same as above)
addGameTask(&Game::playerFollowCreature, player->getID(), creatureId);
}
void ProtocolGame::parseEquipObject(NetworkMessage& msg)
{
uint16_t spriteId = msg.get<uint16_t>();
// msg.get<uint8_t>();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerEquipItem, player->getID(), spriteId);
}
void ProtocolGame::parseTextWindow(NetworkMessage& msg)
{
uint32_t windowTextId = msg.get<uint32_t>();
const std::string newText = msg.getString();
addGameTask(&Game::playerWriteItem, player->getID(), windowTextId, newText);
}
void ProtocolGame::parseHouseWindow(NetworkMessage& msg)
{
uint8_t doorId = msg.getByte();
uint32_t id = msg.get<uint32_t>();
const std::string text = msg.getString();
addGameTask(&Game::playerUpdateHouseWindow, player->getID(), doorId, id, text);
}
void ProtocolGame::parseLookInShop(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInShop, player->getID(), id, count);
}
void ProtocolGame::parsePlayerPurchase(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
uint8_t amount = msg.getByte();
bool ignoreCap = msg.getByte() != 0;
bool inBackpacks = msg.getByte() != 0;
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerPurchaseItem, player->getID(), id, count, amount, ignoreCap, inBackpacks);
}
void ProtocolGame::parsePlayerSale(NetworkMessage& msg)
{
uint16_t id = msg.get<uint16_t>();
uint8_t count = msg.getByte();
uint8_t amount = msg.getByte();
bool ignoreEquipped = msg.getByte() != 0;
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerSellItem, player->getID(), id, count, amount, ignoreEquipped);
}
void ProtocolGame::parseRequestTrade(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
uint32_t playerId = msg.get<uint32_t>();
addGameTask(&Game::playerRequestTrade, player->getID(), pos, stackpos, playerId, spriteId);
}
void ProtocolGame::parseLookInTrade(NetworkMessage& msg)
{
bool counterOffer = (msg.getByte() == 0x01);
uint8_t index = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerLookInTrade, player->getID(), counterOffer, index);
}
void ProtocolGame::parseAddVip(NetworkMessage& msg)
{
const std::string name = msg.getString();
addGameTask(&Game::playerRequestAddVip, player->getID(), name);
}
void ProtocolGame::parseRemoveVip(NetworkMessage& msg)
{
uint32_t guid = msg.get<uint32_t>();
addGameTask(&Game::playerRequestRemoveVip, player->getID(), guid);
}
void ProtocolGame::parseEditVip(NetworkMessage& msg)
{
uint32_t guid = msg.get<uint32_t>();
const std::string description = msg.getString();
uint32_t icon = std::min<uint32_t>(10, msg.get<uint32_t>()); // 10 is max icon in 9.63
bool notify = msg.getByte() != 0;
addGameTask(&Game::playerRequestEditVip, player->getID(), guid, description, icon, notify);
}
void ProtocolGame::parseRotateItem(NetworkMessage& msg)
{
Position pos = msg.getPosition();
uint16_t spriteId = msg.get<uint16_t>();
uint8_t stackpos = msg.getByte();
addGameTaskTimed(DISPATCHER_TASK_EXPIRATION, &Game::playerRotateItem, player->getID(), pos, stackpos, spriteId);
}
void ProtocolGame::parseRuleViolationReport(NetworkMessage& msg)
{
uint8_t reportType = msg.getByte();
uint8_t reportReason = msg.getByte();
const std::string& targetName = msg.getString();
const std::string& comment = msg.getString();
std::string translation;
if (reportType == REPORT_TYPE_NAME) {
translation = msg.getString();
} else if (reportType == REPORT_TYPE_STATEMENT) {
translation = msg.getString();
msg.get<uint32_t>(); // statement id, used to get whatever player have said, we don't log that.
}
addGameTask(&Game::playerReportRuleViolation, player->getID(), targetName, reportType, reportReason, comment, translation);
}
void ProtocolGame::parseBugReport(NetworkMessage& msg)
{
uint8_t category = msg.getByte();
std::string message = msg.getString();
Position position;
if (category == BUG_CATEGORY_MAP) {
position = msg.getPosition();
}
addGameTask(&Game::playerReportBug, player->getID(), message, position, category);
}
void ProtocolGame::parseDebugAssert(NetworkMessage& msg)
{
if (debugAssertSent) {
return;
}
debugAssertSent = true;
std::string assertLine = msg.getString();
std::string date = msg.getString();
std::string description = msg.getString();
std::string comment = msg.getString();
addGameTask(&Game::playerDebugAssert, player->getID(), assertLine, date, description, comment);
}
void ProtocolGame::parseInviteToParty(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerInviteToParty, player->getID(), targetId);
}
void ProtocolGame::parseJoinParty(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerJoinParty, player->getID(), targetId);
}
void ProtocolGame::parseRevokePartyInvite(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerRevokePartyInvitation, player->getID(), targetId);
}
void ProtocolGame::parsePassPartyLeadership(NetworkMessage& msg)
{
uint32_t targetId = msg.get<uint32_t>();
addGameTask(&Game::playerPassPartyLeadership, player->getID(), targetId);
}
void ProtocolGame::parseEnableSharedPartyExperience(NetworkMessage& msg)
{
bool sharedExpActive = msg.getByte() == 1;
addGameTask(&Game::playerEnableSharedPartyExperience, player->getID(), sharedExpActive);
}
void ProtocolGame::parseQuestLine(NetworkMessage& msg)
{
uint16_t questId = msg.get<uint16_t>();
addGameTask(&Game::playerShowQuestLine, player->getID(), questId);
}
void ProtocolGame::parseMarketLeave()
{
addGameTask(&Game::playerLeaveMarket, player->getID());
}
void ProtocolGame::parseMarketBrowse(NetworkMessage& msg)
{
uint16_t browseId = msg.get<uint16_t>();
if (browseId == MARKETREQUEST_OWN_OFFERS) {
addGameTask(&Game::playerBrowseMarketOwnOffers, player->getID());
} else if (browseId == MARKETREQUEST_OWN_HISTORY) {
addGameTask(&Game::playerBrowseMarketOwnHistory, player->getID());
} else {
addGameTask(&Game::playerBrowseMarket, player->getID(), browseId);
}
}
void ProtocolGame::parseMarketCreateOffer(NetworkMessage& msg)
{
uint8_t type = msg.getByte();
uint16_t spriteId = msg.get<uint16_t>();
uint16_t amount = msg.get<uint16_t>();
uint32_t price = msg.get<uint32_t>();
bool anonymous = (msg.getByte() != 0);
addGameTask(&Game::playerCreateMarketOffer, player->getID(), type, spriteId, amount, price, anonymous);
}
void ProtocolGame::parseMarketCancelOffer(NetworkMessage& msg)
{
uint32_t timestamp = msg.get<uint32_t>();
uint16_t counter = msg.get<uint16_t>();
addGameTask(&Game::playerCancelMarketOffer, player->getID(), timestamp, counter);
}
void ProtocolGame::parseMarketAcceptOffer(NetworkMessage& msg)
{
uint32_t timestamp = msg.get<uint32_t>();
uint16_t counter = msg.get<uint16_t>();
uint16_t amount = msg.get<uint16_t>();
addGameTask(&Game::playerAcceptMarketOffer, player->getID(), timestamp, counter, amount);
}
void ProtocolGame::parseModalWindowAnswer(NetworkMessage& msg)
{
uint32_t id = msg.get<uint32_t>();
uint8_t button = msg.getByte();
uint8_t choice = msg.getByte();
addGameTask(&Game::playerAnswerModalWindow, player->getID(), id, button, choice);
}
void ProtocolGame::parseBrowseField(NetworkMessage& msg)
{
const Position& pos = msg.getPosition();
addGameTask(&Game::playerBrowseField, player->getID(), pos);
}
void ProtocolGame::parseSeekInContainer(NetworkMessage& msg)
{
uint8_t containerId = msg.getByte();
uint16_t index = msg.get<uint16_t>();
addGameTask(&Game::playerSeekInContainer, player->getID(), containerId, index);
}
// Send methods
void ProtocolGame::sendOpenPrivateChannel(const std::string& receiver)
{
NetworkMessage msg;
msg.addByte(0xAD);
msg.addString(receiver);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelEvent(uint16_t channelId, const std::string& playerName, ChannelEvent_t channelEvent)
{
NetworkMessage msg;
msg.addByte(0xF3);
msg.add<uint16_t>(channelId);
msg.addString(playerName);
msg.addByte(channelEvent);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureOutfit(const Creature* creature, const Outfit_t& outfit)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x8E);
msg.add<uint32_t>(creature->getID());
AddOutfit(msg, outfit);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureLight(const Creature* creature)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
AddCreatureLight(msg, creature);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendWorldLight(LightInfo lightInfo)
{
NetworkMessage msg;
AddWorldLight(msg, lightInfo);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureWalkthrough(const Creature* creature, bool walkthrough)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x92);
msg.add<uint32_t>(creature->getID());
msg.addByte(walkthrough ? 0x00 : 0x01);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureShield(const Creature* creature)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x91);
msg.add<uint32_t>(creature->getID());
msg.addByte(player->getPartyShield(creature->getPlayer()));
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSkull(const Creature* creature)
{
if (g_game.getWorldType() != WORLD_TYPE_PVP) {
return;
}
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x90);
msg.add<uint32_t>(creature->getID());
msg.addByte(player->getSkullClient(creature));
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureType(uint32_t creatureId, uint8_t creatureType)
{
NetworkMessage msg;
msg.addByte(0x95);
msg.add<uint32_t>(creatureId);
msg.addByte(creatureType);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureHelpers(uint32_t creatureId, uint16_t helpers)
{
NetworkMessage msg;
msg.addByte(0x94);
msg.add<uint32_t>(creatureId);
msg.add<uint16_t>(helpers);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSquare(const Creature* creature, SquareColor_t color)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x93);
msg.add<uint32_t>(creature->getID());
msg.addByte(0x01);
msg.addByte(color);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTutorial(uint8_t tutorialId)
{
NetworkMessage msg;
msg.addByte(0xDC);
msg.addByte(tutorialId);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddMarker(const Position& pos, uint8_t markType, const std::string& desc)
{
NetworkMessage msg;
msg.addByte(0xDD);
msg.addPosition(pos);
msg.addByte(markType);
msg.addString(desc);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendReLoginWindow(uint8_t unfairFightReduction)
{
NetworkMessage msg;
msg.addByte(0x28);
msg.addByte(0x00);
msg.addByte(unfairFightReduction);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendStats()
{
NetworkMessage msg;
AddPlayerStats(msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendBasicData()
{
NetworkMessage msg;
msg.addByte(0x9F);
if (player->isPremium()) {
msg.addByte(1);
msg.add<uint32_t>(time(nullptr) + (player->premiumDays * 86400));
} else {
msg.addByte(0);
msg.add<uint32_t>(0);
}
msg.addByte(player->getVocation()->getClientId());
msg.add<uint16_t>(0xFF); // number of known spells
for (uint8_t spellId = 0x00; spellId < 0xFF; spellId++) {
msg.addByte(spellId);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextMessage(const TextMessage& message)
{
NetworkMessage msg;
msg.addByte(0xB4);
msg.addByte(message.type);
switch (message.type) {
case MESSAGE_DAMAGE_DEALT:
case MESSAGE_DAMAGE_RECEIVED:
case MESSAGE_DAMAGE_OTHERS: {
msg.addPosition(message.position);
msg.add<uint32_t>(message.primary.value);
msg.addByte(message.primary.color);
msg.add<uint32_t>(message.secondary.value);
msg.addByte(message.secondary.color);
break;
}
case MESSAGE_HEALED:
case MESSAGE_HEALED_OTHERS:
case MESSAGE_EXPERIENCE:
case MESSAGE_EXPERIENCE_OTHERS: {
msg.addPosition(message.position);
msg.add<uint32_t>(message.primary.value);
msg.addByte(message.primary.color);
break;
}
case MESSAGE_GUILD:
case MESSAGE_PARTY_MANAGEMENT:
case MESSAGE_PARTY:
msg.add<uint16_t>(message.channelId);
break;
default: {
break;
}
}
msg.addString(message.text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendClosePrivate(uint16_t channelId)
{
NetworkMessage msg;
msg.addByte(0xB3);
msg.add<uint16_t>(channelId);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatePrivateChannel(uint16_t channelId, const std::string& channelName)
{
NetworkMessage msg;
msg.addByte(0xB2);
msg.add<uint16_t>(channelId);
msg.addString(channelName);
msg.add<uint16_t>(0x01);
msg.addString(player->getName());
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelsDialog()
{
NetworkMessage msg;
msg.addByte(0xAB);
const ChannelList& list = g_chat->getChannelList(*player);
msg.addByte(list.size());
for (ChatChannel* channel : list) {
msg.add<uint16_t>(channel->getId());
msg.addString(channel->getName());
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannel(uint16_t channelId, const std::string& channelName, const UsersMap* channelUsers, const InvitedMap* invitedUsers)
{
NetworkMessage msg;
msg.addByte(0xAC);
msg.add<uint16_t>(channelId);
msg.addString(channelName);
if (channelUsers) {
msg.add<uint16_t>(channelUsers->size());
for (const auto& it : *channelUsers) {
msg.addString(it.second->getName());
}
} else {
msg.add<uint16_t>(0x00);
}
if (invitedUsers) {
msg.add<uint16_t>(invitedUsers->size());
for (const auto& it : *invitedUsers) {
msg.addString(it.second->getName());
}
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChannelMessage(const std::string& author, const std::string& text, SpeakClasses type, uint16_t channel)
{
NetworkMessage msg;
msg.addByte(0xAA);
msg.add<uint32_t>(0x00);
msg.addString(author);
msg.add<uint16_t>(0x00);
msg.addByte(type);
msg.add<uint16_t>(channel);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendIcons(uint16_t icons)
{
NetworkMessage msg;
msg.addByte(0xA2);
msg.add<uint16_t>(icons);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendContainer(uint8_t cid, const Container* container, bool hasParent, uint16_t firstIndex)
{
NetworkMessage msg;
msg.addByte(0x6E);
msg.addByte(cid);
if (container->getID() == ITEM_BROWSEFIELD) {
msg.addItem(1987, 1);
msg.addString("Browse Field");
} else {
msg.addItem(container);
msg.addString(container->getName());
}
msg.addByte(container->capacity());
msg.addByte(hasParent ? 0x01 : 0x00);
msg.addByte(container->isUnlocked() ? 0x01 : 0x00); // Drag and drop
msg.addByte(container->hasPagination() ? 0x01 : 0x00); // Pagination
uint32_t containerSize = container->size();
msg.add<uint16_t>(containerSize);
msg.add<uint16_t>(firstIndex);
if (firstIndex < containerSize) {
uint8_t itemsToSend = std::min<uint32_t>(std::min<uint32_t>(container->capacity(), containerSize - firstIndex), std::numeric_limits<uint8_t>::max());
msg.addByte(itemsToSend);
for (auto it = container->getItemList().begin() + firstIndex, end = it + itemsToSend; it != end; ++it) {
msg.addItem(*it);
}
} else {
msg.addByte(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendShop(Npc* npc, const ShopInfoList& itemList)
{
NetworkMessage msg;
msg.addByte(0x7A);
msg.addString(npc->getName());
uint16_t itemsToSend = std::min<size_t>(itemList.size(), std::numeric_limits<uint16_t>::max());
msg.add<uint16_t>(itemsToSend);
uint16_t i = 0;
for (auto it = itemList.begin(); i < itemsToSend; ++it, ++i) {
AddShopItem(msg, *it);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseShop()
{
NetworkMessage msg;
msg.addByte(0x7C);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSaleItemList(const std::list<ShopInfo>& shop)
{
NetworkMessage msg;
msg.addByte(0x7B);
msg.add<uint64_t>(player->getMoney());
std::map<uint16_t, uint32_t> saleMap;
if (shop.size() <= 5) {
// For very small shops it's not worth it to create the complete map
for (const ShopInfo& shopInfo : shop) {
if (shopInfo.sellPrice == 0) {
continue;
}
int8_t subtype = -1;
const ItemType& itemType = Item::items[shopInfo.itemId];
if (itemType.hasSubType() && !itemType.stackable) {
subtype = (shopInfo.subType == 0 ? -1 : shopInfo.subType);
}
uint32_t count = player->getItemTypeCount(shopInfo.itemId, subtype);
if (count > 0) {
saleMap[shopInfo.itemId] = count;
}
}
} else {
// Large shop, it's better to get a cached map of all item counts and use it
// We need a temporary map since the finished map should only contain items
// available in the shop
std::map<uint32_t, uint32_t> tempSaleMap;
player->getAllItemTypeCount(tempSaleMap);
// We must still check manually for the special items that require subtype matches
// (That is, fluids such as potions etc., actually these items are very few since
// health potions now use their own ID)
for (const ShopInfo& shopInfo : shop) {
if (shopInfo.sellPrice == 0) {
continue;
}
int8_t subtype = -1;
const ItemType& itemType = Item::items[shopInfo.itemId];
if (itemType.hasSubType() && !itemType.stackable) {
subtype = (shopInfo.subType == 0 ? -1 : shopInfo.subType);
}
if (subtype != -1) {
uint32_t count;
if (!itemType.isFluidContainer() && !itemType.isSplash()) {
count = player->getItemTypeCount(shopInfo.itemId, subtype); // This shop item requires extra checks
} else {
count = subtype;
}
if (count > 0) {
saleMap[shopInfo.itemId] = count;
}
} else {
std::map<uint32_t, uint32_t>::const_iterator findIt = tempSaleMap.find(shopInfo.itemId);
if (findIt != tempSaleMap.end() && findIt->second > 0) {
saleMap[shopInfo.itemId] = findIt->second;
}
}
}
}
uint8_t itemsToSend = std::min<size_t>(saleMap.size(), std::numeric_limits<uint8_t>::max());
msg.addByte(itemsToSend);
uint8_t i = 0;
for (std::map<uint16_t, uint32_t>::const_iterator it = saleMap.begin(); i < itemsToSend; ++it, ++i) {
msg.addItemId(it->first);
msg.addByte(std::min<uint32_t>(it->second, std::numeric_limits<uint8_t>::max()));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketEnter(uint32_t depotId)
{
NetworkMessage msg;
msg.addByte(0xF6);
msg.add<uint64_t>(player->getBankBalance());
msg.addByte(std::min<uint32_t>(IOMarket::getPlayerOfferCount(player->getGUID()), std::numeric_limits<uint8_t>::max()));
DepotChest* depotChest = player->getDepotChest(depotId, false);
if (!depotChest) {
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
return;
}
player->setInMarket(true);
std::map<uint16_t, uint32_t> depotItems;
std::forward_list<Container*> containerList { depotChest, player->getInbox() };
do {
Container* container = containerList.front();
containerList.pop_front();
for (Item* item : container->getItemList()) {
Container* c = item->getContainer();
if (c && !c->empty()) {
containerList.push_front(c);
continue;
}
const ItemType& itemType = Item::items[item->getID()];
if (itemType.wareId == 0) {
continue;
}
if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) {
continue;
}
if (!item->hasMarketAttributes()) {
continue;
}
depotItems[itemType.wareId] += Item::countByType(item, -1);
}
} while (!containerList.empty());
uint16_t itemsToSend = std::min<size_t>(depotItems.size(), std::numeric_limits<uint16_t>::max());
msg.add<uint16_t>(itemsToSend);
uint16_t i = 0;
for (std::map<uint16_t, uint32_t>::const_iterator it = depotItems.begin(); i < itemsToSend; ++it, ++i) {
msg.add<uint16_t>(it->first);
msg.add<uint16_t>(std::min<uint32_t>(0xFFFF, it->second));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketLeave()
{
NetworkMessage msg;
msg.addByte(0xF7);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseItem(uint16_t itemId, const MarketOfferList& buyOffers, const MarketOfferList& sellOffers)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.addItemId(itemId);
msg.add<uint32_t>(buyOffers.size());
for (const MarketOffer& offer : buyOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
msg.add<uint32_t>(sellOffers.size());
for (const MarketOffer& offer : sellOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketAcceptOffer(const MarketOfferEx& offer)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.addItemId(offer.itemId);
if (offer.type == MARKETACTION_BUY) {
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
msg.add<uint32_t>(0x00);
} else {
msg.add<uint32_t>(0x00);
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.addString(offer.playerName);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseOwnOffers(const MarketOfferList& buyOffers, const MarketOfferList& sellOffers)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_OFFERS);
msg.add<uint32_t>(buyOffers.size());
for (const MarketOffer& offer : buyOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
msg.add<uint32_t>(sellOffers.size());
for (const MarketOffer& offer : sellOffers) {
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketCancelOffer(const MarketOfferEx& offer)
{
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_OFFERS);
if (offer.type == MARKETACTION_BUY) {
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
msg.add<uint32_t>(0x00);
} else {
msg.add<uint32_t>(0x00);
msg.add<uint32_t>(0x01);
msg.add<uint32_t>(offer.timestamp);
msg.add<uint16_t>(offer.counter);
msg.addItemId(offer.itemId);
msg.add<uint16_t>(offer.amount);
msg.add<uint32_t>(offer.price);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketBrowseOwnHistory(const HistoryMarketOfferList& buyOffers, const HistoryMarketOfferList& sellOffers)
{
uint32_t i = 0;
std::map<uint32_t, uint16_t> counterMap;
uint32_t buyOffersToSend = std::min<uint32_t>(buyOffers.size(), 810 + std::max<int32_t>(0, 810 - sellOffers.size()));
uint32_t sellOffersToSend = std::min<uint32_t>(sellOffers.size(), 810 + std::max<int32_t>(0, 810 - buyOffers.size()));
NetworkMessage msg;
msg.addByte(0xF9);
msg.add<uint16_t>(MARKETREQUEST_OWN_HISTORY);
msg.add<uint32_t>(buyOffersToSend);
for (auto it = buyOffers.begin(); i < buyOffersToSend; ++it, ++i) {
msg.add<uint32_t>(it->timestamp);
msg.add<uint16_t>(counterMap[it->timestamp]++);
msg.addItemId(it->itemId);
msg.add<uint16_t>(it->amount);
msg.add<uint32_t>(it->price);
msg.addByte(it->state);
}
counterMap.clear();
i = 0;
msg.add<uint32_t>(sellOffersToSend);
for (auto it = sellOffers.begin(); i < sellOffersToSend; ++it, ++i) {
msg.add<uint32_t>(it->timestamp);
msg.add<uint16_t>(counterMap[it->timestamp]++);
msg.addItemId(it->itemId);
msg.add<uint16_t>(it->amount);
msg.add<uint32_t>(it->price);
msg.addByte(it->state);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMarketDetail(uint16_t itemId)
{
NetworkMessage msg;
msg.addByte(0xF8);
msg.addItemId(itemId);
const ItemType& it = Item::items[itemId];
if (it.armor != 0) {
msg.addString(std::to_string(it.armor));
} else {
msg.add<uint16_t>(0x00);
}
if (it.attack != 0) {
// TODO: chance to hit, range
// example:
// "attack +x, chance to hit +y%, z fields"
if (it.abilities && it.abilities->elementType != COMBAT_NONE && it.abilities->elementDamage != 0) {
std::ostringstream ss;
ss << it.attack << " physical +" << it.abilities->elementDamage << ' ' << getCombatName(it.abilities->elementType);
msg.addString(ss.str());
} else {
msg.addString(std::to_string(it.attack));
}
} else {
msg.add<uint16_t>(0x00);
}
if (it.isContainer()) {
msg.addString(std::to_string(it.maxItems));
} else {
msg.add<uint16_t>(0x00);
}
if (it.defense != 0) {
if (it.extraDefense != 0) {
std::ostringstream ss;
ss << it.defense << ' ' << std::showpos << it.extraDefense << std::noshowpos;
msg.addString(ss.str());
} else {
msg.addString(std::to_string(it.defense));
}
} else {
msg.add<uint16_t>(0x00);
}
if (!it.description.empty()) {
const std::string& descr = it.description;
if (descr.back() == '.') {
msg.addString(std::string(descr, 0, descr.length() - 1));
} else {
msg.addString(descr);
}
} else {
msg.add<uint16_t>(0x00);
}
if (it.decayTime != 0) {
std::ostringstream ss;
ss << it.decayTime << " seconds";
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
if (it.abilities) {
std::ostringstream ss;
bool separator = false;
for (size_t i = 0; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] == 0) {
continue;
}
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << getCombatName(indexToCombatType(i)) << ' ' << std::showpos << it.abilities->absorbPercent[i] << std::noshowpos << '%';
}
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
if (it.minReqLevel != 0) {
msg.addString(std::to_string(it.minReqLevel));
} else {
msg.add<uint16_t>(0x00);
}
if (it.minReqMagicLevel != 0) {
msg.addString(std::to_string(it.minReqMagicLevel));
} else {
msg.add<uint16_t>(0x00);
}
msg.addString(it.vocationString);
msg.addString(it.runeSpellName);
if (it.abilities) {
std::ostringstream ss;
bool separator = false;
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; i++) {
if (!it.abilities->skills[i]) {
continue;
}
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << getSkillName(i) << ' ' << std::showpos << it.abilities->skills[i] << std::noshowpos;
}
if (it.abilities->stats[STAT_MAGICPOINTS] != 0) {
if (separator) {
ss << ", ";
} else {
separator = true;
}
ss << "magic level " << std::showpos << it.abilities->stats[STAT_MAGICPOINTS] << std::noshowpos;
}
if (it.abilities->speed != 0) {
if (separator) {
ss << ", ";
}
ss << "speed " << std::showpos << (it.abilities->speed >> 1) << std::noshowpos;
}
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
if (it.charges != 0) {
msg.addString(std::to_string(it.charges));
} else {
msg.add<uint16_t>(0x00);
}
std::string weaponName = getWeaponName(it.weaponType);
if (it.slotPosition & SLOTP_TWO_HAND) {
if (!weaponName.empty()) {
weaponName += ", two-handed";
} else {
weaponName = "two-handed";
}
}
msg.addString(weaponName);
if (it.weight != 0) {
std::ostringstream ss;
if (it.weight < 10) {
ss << "0.0" << it.weight;
} else if (it.weight < 100) {
ss << "0." << it.weight;
} else {
std::string weightString = std::to_string(it.weight);
weightString.insert(weightString.end() - 2, '.');
ss << weightString;
}
ss << " oz";
msg.addString(ss.str());
} else {
msg.add<uint16_t>(0x00);
}
MarketStatistics* statistics = IOMarket::getInstance().getPurchaseStatistics(itemId);
if (statistics) {
msg.addByte(0x01);
msg.add<uint32_t>(statistics->numTransactions);
msg.add<uint32_t>(std::min<uint64_t>(std::numeric_limits<uint32_t>::max(), statistics->totalPrice));
msg.add<uint32_t>(statistics->highestPrice);
msg.add<uint32_t>(statistics->lowestPrice);
} else {
msg.addByte(0x00);
}
statistics = IOMarket::getInstance().getSaleStatistics(itemId);
if (statistics) {
msg.addByte(0x01);
msg.add<uint32_t>(statistics->numTransactions);
msg.add<uint32_t>(std::min<uint64_t>(std::numeric_limits<uint32_t>::max(), statistics->totalPrice));
msg.add<uint32_t>(statistics->highestPrice);
msg.add<uint32_t>(statistics->lowestPrice);
} else {
msg.addByte(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendQuestLog()
{
NetworkMessage msg;
msg.addByte(0xF0);
msg.add<uint16_t>(g_game.quests.getQuestsCount(player));
for (const Quest& quest : g_game.quests.getQuests()) {
if (quest.isStarted(player)) {
msg.add<uint16_t>(quest.getID());
msg.addString(quest.getName());
msg.addByte(quest.isCompleted(player));
}
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendQuestLine(const Quest* quest)
{
NetworkMessage msg;
msg.addByte(0xF1);
msg.add<uint16_t>(quest->getID());
msg.addByte(quest->getMissionsCount(player));
for (const Mission& mission : quest->getMissions()) {
if (mission.isStarted(player)) {
msg.addString(mission.getName(player));
msg.addString(mission.getDescription(player));
}
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTradeItemRequest(const std::string& traderName, const Item* item, bool ack)
{
NetworkMessage msg;
if (ack) {
msg.addByte(0x7D);
} else {
msg.addByte(0x7E);
}
msg.addString(traderName);
if (const Container* tradeContainer = item->getContainer()) {
std::list<const Container*> listContainer {tradeContainer};
std::list<const Item*> itemList {tradeContainer};
while (!listContainer.empty()) {
const Container* container = listContainer.front();
listContainer.pop_front();
for (Item* containerItem : container->getItemList()) {
Container* tmpContainer = containerItem->getContainer();
if (tmpContainer) {
listContainer.push_back(tmpContainer);
}
itemList.push_back(containerItem);
}
}
msg.addByte(itemList.size());
for (const Item* listItem : itemList) {
msg.addItem(listItem);
}
} else {
msg.addByte(0x01);
msg.addItem(item);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseTrade()
{
NetworkMessage msg;
msg.addByte(0x7F);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCloseContainer(uint8_t cid)
{
NetworkMessage msg;
msg.addByte(0x6F);
msg.addByte(cid);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureTurn(const Creature* creature, uint32_t stackPos)
{
if (!canSee(creature)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6B);
msg.addPosition(creature->getPosition());
msg.addByte(stackPos);
msg.add<uint16_t>(0x63);
msg.add<uint32_t>(creature->getID());
msg.addByte(creature->getDirection());
msg.addByte(player->canWalkthroughEx(creature) ? 0x00 : 0x01);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureSay(const Creature* creature, SpeakClasses type, const std::string& text, const Position* pos/* = nullptr*/)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
msg.addString(creature->getName());
//Add level only for players
if (const Player* speaker = creature->getPlayer()) {
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint16_t>(0x00);
}
msg.addByte(type);
if (pos) {
msg.addPosition(*pos);
} else {
msg.addPosition(creature->getPosition());
}
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendToChannel(const Creature* creature, SpeakClasses type, const std::string& text, uint16_t channelId)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
if (!creature) {
msg.add<uint32_t>(0x00);
} else if (type == TALKTYPE_CHANNEL_R2) {
msg.add<uint32_t>(0x00);
type = TALKTYPE_CHANNEL_R1;
} else {
msg.addString(creature->getName());
//Add level only for players
if (const Player* speaker = creature->getPlayer()) {
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint16_t>(0x00);
}
}
msg.addByte(type);
msg.add<uint16_t>(channelId);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPrivateMessage(const Player* speaker, SpeakClasses type, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0xAA);
static uint32_t statementId = 0;
msg.add<uint32_t>(++statementId);
if (speaker) {
msg.addString(speaker->getName());
msg.add<uint16_t>(speaker->getLevel());
} else {
msg.add<uint32_t>(0x00);
}
msg.addByte(type);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCancelTarget()
{
NetworkMessage msg;
msg.addByte(0xA3);
msg.add<uint32_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendChangeSpeed(const Creature* creature, uint32_t speed)
{
NetworkMessage msg;
msg.addByte(0x8F);
msg.add<uint32_t>(creature->getID());
msg.add<uint16_t>(creature->getBaseSpeed() / 2);
msg.add<uint16_t>(speed / 2);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCancelWalk()
{
NetworkMessage msg;
msg.addByte(0xB5);
msg.addByte(player->getDirection());
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSkills()
{
NetworkMessage msg;
AddPlayerSkills(msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPing()
{
NetworkMessage msg;
msg.addByte(0x1D);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPingBack()
{
NetworkMessage msg;
msg.addByte(0x1E);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendDistanceShoot(const Position& from, const Position& to, uint8_t type)
{
NetworkMessage msg;
msg.addByte(0x85);
msg.addPosition(from);
msg.addPosition(to);
msg.addByte(type);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendMagicEffect(const Position& pos, uint8_t type)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x83);
msg.addPosition(pos);
msg.addByte(type);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendCreatureHealth(const Creature* creature)
{
NetworkMessage msg;
msg.addByte(0x8C);
msg.add<uint32_t>(creature->getID());
if (creature->isHealthHidden()) {
msg.addByte(0x00);
} else {
msg.addByte(std::ceil((static_cast<double>(creature->getHealth()) / std::max<int32_t>(creature->getMaxHealth(), 1)) * 100));
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendFYIBox(const std::string& message)
{
NetworkMessage msg;
msg.addByte(0x15);
msg.addString(message);
writeToOutputBuffer(msg);
}
//tile
void ProtocolGame::sendMapDescription(const Position& pos)
{
NetworkMessage msg;
msg.addByte(0x64);
msg.addPosition(player->getPosition());
GetMapDescription(pos.x - 8, pos.y - 6, pos.z, 18, 14, msg);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddTileItem(const Position& pos, uint32_t stackpos, const Item* item)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6A);
msg.addPosition(pos);
msg.addByte(stackpos);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateTileItem(const Position& pos, uint32_t stackpos, const Item* item)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x6B);
msg.addPosition(pos);
msg.addByte(stackpos);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendRemoveTileThing(const Position& pos, uint32_t stackpos)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
RemoveTileThing(msg, pos, stackpos);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateTile(const Tile* tile, const Position& pos)
{
if (!canSee(pos)) {
return;
}
NetworkMessage msg;
msg.addByte(0x69);
msg.addPosition(pos);
if (tile) {
GetTileDescription(tile, msg);
msg.addByte(0x00);
msg.addByte(0xFF);
} else {
msg.addByte(0x01);
msg.addByte(0xFF);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendPendingStateEntered()
{
NetworkMessage msg;
msg.addByte(0x0A);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendEnterWorld()
{
NetworkMessage msg;
msg.addByte(0x0F);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendFightModes()
{
NetworkMessage msg;
msg.addByte(0xA7);
msg.addByte(player->fightMode);
msg.addByte(player->chaseMode);
msg.addByte(player->secureMode);
msg.addByte(PVP_MODE_DOVE);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddCreature(const Creature* creature, const Position& pos, int32_t stackpos, bool isLogin)
{
if (!canSee(pos)) {
return;
}
if (creature != player) {
if (stackpos != -1) {
NetworkMessage msg;
msg.addByte(0x6A);
msg.addPosition(pos);
msg.addByte(stackpos);
bool known;
uint32_t removedKnown;
checkCreatureAsKnown(creature->getID(), known, removedKnown);
AddCreature(msg, creature, known, removedKnown);
writeToOutputBuffer(msg);
}
if (isLogin) {
sendMagicEffect(pos, CONST_ME_TELEPORT);
}
return;
}
NetworkMessage msg;
msg.addByte(0x17);
msg.add<uint32_t>(player->getID());
msg.add<uint16_t>(0x32); // beat duration (50)
msg.addDouble(Creature::speedA, 3);
msg.addDouble(Creature::speedB, 3);
msg.addDouble(Creature::speedC, 3);
// can report bugs?
if (player->getAccountType() >= ACCOUNT_TYPE_TUTOR) {
msg.addByte(0x01);
} else {
msg.addByte(0x00);
}
msg.addByte(0x00); // can change pvp framing option
msg.addByte(0x00); // expert mode button enabled
msg.add<uint16_t>(0x00); // URL (string) to ingame store images
msg.add<uint16_t>(25); // premium coin package size
writeToOutputBuffer(msg);
sendPendingStateEntered();
sendEnterWorld();
sendMapDescription(pos);
if (isLogin) {
sendMagicEffect(pos, CONST_ME_TELEPORT);
}
sendInventoryItem(CONST_SLOT_HEAD, player->getInventoryItem(CONST_SLOT_HEAD));
sendInventoryItem(CONST_SLOT_NECKLACE, player->getInventoryItem(CONST_SLOT_NECKLACE));
sendInventoryItem(CONST_SLOT_BACKPACK, player->getInventoryItem(CONST_SLOT_BACKPACK));
sendInventoryItem(CONST_SLOT_ARMOR, player->getInventoryItem(CONST_SLOT_ARMOR));
sendInventoryItem(CONST_SLOT_RIGHT, player->getInventoryItem(CONST_SLOT_RIGHT));
sendInventoryItem(CONST_SLOT_LEFT, player->getInventoryItem(CONST_SLOT_LEFT));
sendInventoryItem(CONST_SLOT_LEGS, player->getInventoryItem(CONST_SLOT_LEGS));
sendInventoryItem(CONST_SLOT_FEET, player->getInventoryItem(CONST_SLOT_FEET));
sendInventoryItem(CONST_SLOT_RING, player->getInventoryItem(CONST_SLOT_RING));
sendInventoryItem(CONST_SLOT_AMMO, player->getInventoryItem(CONST_SLOT_AMMO));
sendStats();
sendSkills();
//gameworld light-settings
sendWorldLight(g_game.getWorldLightInfo());
//player light level
sendCreatureLight(creature);
sendVIPEntries();
sendBasicData();
player->sendIcons();
}
void ProtocolGame::sendMoveCreature(const Creature* creature, const Position& newPos, int32_t newStackPos, const Position& oldPos, int32_t oldStackPos, bool teleport)
{
if (creature == player) {
if (oldStackPos >= 10) {
sendMapDescription(newPos);
} else if (teleport) {
NetworkMessage msg;
RemoveTileThing(msg, oldPos, oldStackPos);
writeToOutputBuffer(msg);
sendMapDescription(newPos);
} else {
NetworkMessage msg;
if (oldPos.z == 7 && newPos.z >= 8) {
RemoveTileThing(msg, oldPos, oldStackPos);
} else {
msg.addByte(0x6D);
msg.addPosition(oldPos);
msg.addByte(oldStackPos);
msg.addPosition(newPos);
}
if (newPos.z > oldPos.z) {
MoveDownCreature(msg, creature, newPos, oldPos);
} else if (newPos.z < oldPos.z) {
MoveUpCreature(msg, creature, newPos, oldPos);
}
if (oldPos.y > newPos.y) { // north, for old x
msg.addByte(0x65);
GetMapDescription(oldPos.x - 8, newPos.y - 6, newPos.z, 18, 1, msg);
} else if (oldPos.y < newPos.y) { // south, for old x
msg.addByte(0x67);
GetMapDescription(oldPos.x - 8, newPos.y + 7, newPos.z, 18, 1, msg);
}
if (oldPos.x < newPos.x) { // east, [with new y]
msg.addByte(0x66);
GetMapDescription(newPos.x + 9, newPos.y - 6, newPos.z, 1, 14, msg);
} else if (oldPos.x > newPos.x) { // west, [with new y]
msg.addByte(0x68);
GetMapDescription(newPos.x - 8, newPos.y - 6, newPos.z, 1, 14, msg);
}
writeToOutputBuffer(msg);
}
} else if (canSee(oldPos) && canSee(creature->getPosition())) {
if (teleport || (oldPos.z == 7 && newPos.z >= 8) || oldStackPos >= 10) {
sendRemoveTileThing(oldPos, oldStackPos);
sendAddCreature(creature, newPos, newStackPos, false);
} else {
NetworkMessage msg;
msg.addByte(0x6D);
msg.addPosition(oldPos);
msg.addByte(oldStackPos);
msg.addPosition(creature->getPosition());
writeToOutputBuffer(msg);
}
} else if (canSee(oldPos)) {
sendRemoveTileThing(oldPos, oldStackPos);
} else if (canSee(creature->getPosition())) {
sendAddCreature(creature, newPos, newStackPos, false);
}
}
void ProtocolGame::sendInventoryItem(slots_t slot, const Item* item)
{
NetworkMessage msg;
if (item) {
msg.addByte(0x78);
msg.addByte(slot);
msg.addItem(item);
} else {
msg.addByte(0x79);
msg.addByte(slot);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendItems()
{
NetworkMessage msg;
msg.addByte(0xF5);
const std::vector<uint16_t>& inventory = Item::items.getInventory();
msg.add<uint16_t>(inventory.size() + 11);
for (uint16_t i = 1; i <= 11; i++) {
msg.add<uint16_t>(i);
msg.addByte(0); //always 0
msg.add<uint16_t>(1); // always 1
}
for (auto clientId : inventory) {
msg.add<uint16_t>(clientId);
msg.addByte(0); //always 0
msg.add<uint16_t>(1);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendAddContainerItem(uint8_t cid, uint16_t slot, const Item* item)
{
NetworkMessage msg;
msg.addByte(0x70);
msg.addByte(cid);
msg.add<uint16_t>(slot);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdateContainerItem(uint8_t cid, uint16_t slot, const Item* item)
{
NetworkMessage msg;
msg.addByte(0x71);
msg.addByte(cid);
msg.add<uint16_t>(slot);
msg.addItem(item);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendRemoveContainerItem(uint8_t cid, uint16_t slot, const Item* lastItem)
{
NetworkMessage msg;
msg.addByte(0x72);
msg.addByte(cid);
msg.add<uint16_t>(slot);
if (lastItem) {
msg.addItem(lastItem);
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextWindow(uint32_t windowTextId, Item* item, uint16_t maxlen, bool canWrite)
{
NetworkMessage msg;
msg.addByte(0x96);
msg.add<uint32_t>(windowTextId);
msg.addItem(item);
if (canWrite) {
msg.add<uint16_t>(maxlen);
msg.addString(item->getText());
} else {
const std::string& text = item->getText();
msg.add<uint16_t>(text.size());
msg.addString(text);
}
const std::string& writer = item->getWriter();
if (!writer.empty()) {
msg.addString(writer);
} else {
msg.add<uint16_t>(0x00);
}
time_t writtenDate = item->getDate();
if (writtenDate != 0) {
msg.addString(formatDateShort(writtenDate));
} else {
msg.add<uint16_t>(0x00);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendTextWindow(uint32_t windowTextId, uint32_t itemId, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0x96);
msg.add<uint32_t>(windowTextId);
msg.addItem(itemId, 1);
msg.add<uint16_t>(text.size());
msg.addString(text);
msg.add<uint16_t>(0x00);
msg.add<uint16_t>(0x00);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendHouseWindow(uint32_t windowTextId, const std::string& text)
{
NetworkMessage msg;
msg.addByte(0x97);
msg.addByte(0x00);
msg.add<uint32_t>(windowTextId);
msg.addString(text);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendOutfitWindow()
{
NetworkMessage msg;
msg.addByte(0xC8);
Outfit_t currentOutfit = player->getDefaultOutfit();
Mount* currentMount = g_game.mounts.getMountByID(player->getCurrentMount());
if (currentMount) {
currentOutfit.lookMount = currentMount->clientId;
}
AddOutfit(msg, currentOutfit);
std::vector<ProtocolOutfit> protocolOutfits;
if (player->isAccessPlayer()) {
static const std::string gamemasterOutfitName = "Gamemaster";
protocolOutfits.emplace_back(gamemasterOutfitName, 75, 0);
}
const auto& outfits = Outfits::getInstance().getOutfits(player->getSex());
protocolOutfits.reserve(outfits.size());
for (const Outfit& outfit : outfits) {
uint8_t addons;
if (!player->getOutfitAddons(outfit, addons)) {
continue;
}
protocolOutfits.emplace_back(outfit.name, outfit.lookType, addons);
if (protocolOutfits.size() == 100) { // Game client doesn't allow more than 100 outfits
break;
}
}
msg.addByte(protocolOutfits.size());
for (const ProtocolOutfit& outfit : protocolOutfits) {
msg.add<uint16_t>(outfit.lookType);
msg.addString(outfit.name);
msg.addByte(outfit.addons);
}
std::vector<const Mount*> mounts;
for (const Mount& mount : g_game.mounts.getMounts()) {
if (player->hasMount(&mount)) {
mounts.push_back(&mount);
}
}
msg.addByte(mounts.size());
for (const Mount* mount : mounts) {
msg.add<uint16_t>(mount->clientId);
msg.addString(mount->name);
}
writeToOutputBuffer(msg);
}
void ProtocolGame::sendUpdatedVIPStatus(uint32_t guid, VipStatus_t newStatus)
{
NetworkMessage msg;
msg.addByte(0xD3);
msg.add<uint32_t>(guid);
msg.addByte(newStatus);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendVIP(uint32_t guid, const std::string& name, const std::string& description, uint32_t icon, bool notify, VipStatus_t status)
{
NetworkMessage msg;
msg.addByte(0xD2);
msg.add<uint32_t>(guid);
msg.addString(name);
msg.addString(description);
msg.add<uint32_t>(std::min<uint32_t>(10, icon));
msg.addByte(notify ? 0x01 : 0x00);
msg.addByte(status);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendVIPEntries()
{
const std::forward_list<VIPEntry>& vipEntries = IOLoginData::getVIPEntries(player->getAccount());
for (const VIPEntry& entry : vipEntries) {
VipStatus_t vipStatus = VIPSTATUS_ONLINE;
Player* vipPlayer = g_game.getPlayerByGUID(entry.guid);
if (!vipPlayer || vipPlayer->isInGhostMode() || player->isAccessPlayer()) {
vipStatus = VIPSTATUS_OFFLINE;
}
sendVIP(entry.guid, entry.name, entry.description, entry.icon, entry.notify, vipStatus);
}
}
void ProtocolGame::sendSpellCooldown(uint8_t spellId, uint32_t time)
{
NetworkMessage msg;
msg.addByte(0xA4);
msg.addByte(spellId);
msg.add<uint32_t>(time);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendSpellGroupCooldown(SpellGroup_t groupId, uint32_t time)
{
NetworkMessage msg;
msg.addByte(0xA5);
msg.addByte(groupId);
msg.add<uint32_t>(time);
writeToOutputBuffer(msg);
}
void ProtocolGame::sendModalWindow(const ModalWindow& modalWindow)
{
NetworkMessage msg;
msg.addByte(0xFA);
msg.add<uint32_t>(modalWindow.id);
msg.addString(modalWindow.title);
msg.addString(modalWindow.message);
msg.addByte(modalWindow.buttons.size());
for (const auto& it : modalWindow.buttons) {
msg.addString(it.first);
msg.addByte(it.second);
}
msg.addByte(modalWindow.choices.size());
for (const auto& it : modalWindow.choices) {
msg.addString(it.first);
msg.addByte(it.second);
}
msg.addByte(modalWindow.defaultEscapeButton);
msg.addByte(modalWindow.defaultEnterButton);
msg.addByte(modalWindow.priority ? 0x01 : 0x00);
writeToOutputBuffer(msg);
}
////////////// Add common messages
void ProtocolGame::AddCreature(NetworkMessage& msg, const Creature* creature, bool known, uint32_t remove)
{
CreatureType_t creatureType = creature->getType();
const Player* otherPlayer = creature->getPlayer();
if (known) {
msg.add<uint16_t>(0x62);
msg.add<uint32_t>(creature->getID());
} else {
msg.add<uint16_t>(0x61);
msg.add<uint32_t>(remove);
msg.add<uint32_t>(creature->getID());
msg.addByte(creatureType);
msg.addString(creature->getName());
}
if (creature->isHealthHidden()) {
msg.addByte(0x00);
} else {
msg.addByte(std::ceil((static_cast<double>(creature->getHealth()) / std::max<int32_t>(creature->getMaxHealth(), 1)) * 100));
}
msg.addByte(creature->getDirection());
if (!creature->isInGhostMode() && !creature->isInvisible()) {
AddOutfit(msg, creature->getCurrentOutfit());
} else {
static Outfit_t outfit;
AddOutfit(msg, outfit);
}
LightInfo lightInfo = creature->getCreatureLight();
msg.addByte(player->isAccessPlayer() ? 0xFF : lightInfo.level);
msg.addByte(lightInfo.color);
msg.add<uint16_t>(creature->getStepSpeed() / 2);
msg.addByte(player->getSkullClient(creature));
msg.addByte(player->getPartyShield(otherPlayer));
if (!known) {
msg.addByte(player->getGuildEmblem(otherPlayer));
}
if (creatureType == CREATURETYPE_MONSTER) {
const Creature* master = creature->getMaster();
if (master) {
const Player* masterPlayer = master->getPlayer();
if (masterPlayer) {
if (masterPlayer == player) {
creatureType = CREATURETYPE_SUMMON_OWN;
} else {
creatureType = CREATURETYPE_SUMMON_OTHERS;
}
}
}
}
msg.addByte(creatureType); // Type (for summons)
msg.addByte(creature->getSpeechBubble());
msg.addByte(0xFF); // MARK_UNMARKED
if (otherPlayer) {
msg.add<uint16_t>(otherPlayer->getHelpers());
} else {
msg.add<uint16_t>(0x00);
}
msg.addByte(player->canWalkthroughEx(creature) ? 0x00 : 0x01);
}
void ProtocolGame::AddPlayerStats(NetworkMessage& msg)
{
msg.addByte(0xA0);
msg.add<uint16_t>(std::min<int32_t>(player->getHealth(), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(std::min<int32_t>(player->getMaxHealth(), std::numeric_limits<uint16_t>::max()));
msg.add<uint32_t>(player->getFreeCapacity());
msg.add<uint32_t>(player->getCapacity());
msg.add<uint64_t>(player->getExperience());
msg.add<uint16_t>(player->getLevel());
msg.addByte(player->getLevelPercent());
msg.add<uint16_t>(100); // base xp gain rate
msg.add<uint16_t>(0); // xp voucher
msg.add<uint16_t>(0); // low level bonus
msg.add<uint16_t>(0); // xp boost
msg.add<uint16_t>(100); // stamina multiplier (100 = x1.0)
msg.add<uint16_t>(std::min<int32_t>(player->getMana(), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(std::min<int32_t>(player->getMaxMana(), std::numeric_limits<uint16_t>::max()));
msg.addByte(std::min<uint32_t>(player->getMagicLevel(), std::numeric_limits<uint8_t>::max()));
msg.addByte(std::min<uint32_t>(player->getBaseMagicLevel(), std::numeric_limits<uint8_t>::max()));
msg.addByte(player->getMagicLevelPercent());
msg.addByte(player->getSoul());
msg.add<uint16_t>(player->getStaminaMinutes());
msg.add<uint16_t>(player->getBaseSpeed() / 2);
Condition* condition = player->getCondition(CONDITION_REGENERATION);
msg.add<uint16_t>(condition ? condition->getTicks() / 1000 : 0x00);
msg.add<uint16_t>(player->getOfflineTrainingTime() / 60 / 1000);
msg.add<uint16_t>(0); // xp boost time (seconds)
msg.addByte(0); // enables exp boost in the store
}
void ProtocolGame::AddPlayerSkills(NetworkMessage& msg)
{
msg.addByte(0xA1);
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; ++i) {
msg.add<uint16_t>(std::min<int32_t>(player->getSkillLevel(i), std::numeric_limits<uint16_t>::max()));
msg.add<uint16_t>(player->getBaseSkill(i));
msg.addByte(player->getSkillPercent(i));
}
for (uint8_t i = SPECIALSKILL_FIRST; i <= SPECIALSKILL_LAST; ++i) {
msg.add<uint16_t>(std::min<int32_t>(100, player->varSpecialSkills[i]));
msg.add<uint16_t>(0);
}
}
void ProtocolGame::AddOutfit(NetworkMessage& msg, const Outfit_t& outfit)
{
msg.add<uint16_t>(outfit.lookType);
if (outfit.lookType != 0) {
msg.addByte(outfit.lookHead);
msg.addByte(outfit.lookBody);
msg.addByte(outfit.lookLegs);
msg.addByte(outfit.lookFeet);
msg.addByte(outfit.lookAddons);
} else {
msg.addItemId(outfit.lookTypeEx);
}
msg.add<uint16_t>(outfit.lookMount);
}
void ProtocolGame::AddWorldLight(NetworkMessage& msg, LightInfo lightInfo)
{
msg.addByte(0x82);
msg.addByte((player->isAccessPlayer() ? 0xFF : lightInfo.level));
msg.addByte(lightInfo.color);
}
void ProtocolGame::AddCreatureLight(NetworkMessage& msg, const Creature* creature)
{
LightInfo lightInfo = creature->getCreatureLight();
msg.addByte(0x8D);
msg.add<uint32_t>(creature->getID());
msg.addByte((player->isAccessPlayer() ? 0xFF : lightInfo.level));
msg.addByte(lightInfo.color);
}
//tile
void ProtocolGame::RemoveTileThing(NetworkMessage& msg, const Position& pos, uint32_t stackpos)
{
if (stackpos >= 10) {
return;
}
msg.addByte(0x6C);
msg.addPosition(pos);
msg.addByte(stackpos);
}
void ProtocolGame::MoveUpCreature(NetworkMessage& msg, const Creature* creature, const Position& newPos, const Position& oldPos)
{
if (creature != player) {
return;
}
//floor change up
msg.addByte(0xBE);
//going to surface
if (newPos.z == 7) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 5, 18, 14, 3, skip); //(floor 7 and 6 already set)
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 4, 18, 14, 4, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 3, 18, 14, 5, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 2, 18, 14, 6, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 1, 18, 14, 7, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, 0, 18, 14, 8, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//underground, going one floor up (still underground)
else if (newPos.z > 7) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, oldPos.getZ() - 3, 18, 14, 3, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//moving up a floor up makes us out of sync
//west
msg.addByte(0x68);
GetMapDescription(oldPos.x - 8, oldPos.y - 5, newPos.z, 1, 14, msg);
//north
msg.addByte(0x65);
GetMapDescription(oldPos.x - 8, oldPos.y - 6, newPos.z, 18, 1, msg);
}
void ProtocolGame::MoveDownCreature(NetworkMessage& msg, const Creature* creature, const Position& newPos, const Position& oldPos)
{
if (creature != player) {
return;
}
//floor change down
msg.addByte(0xBF);
//going from surface to underground
if (newPos.z == 8) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, newPos.z, 18, 14, -1, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, newPos.z + 1, 18, 14, -2, skip);
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, newPos.z + 2, 18, 14, -3, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//going further down
else if (newPos.z > oldPos.z && newPos.z > 8 && newPos.z < 14) {
int32_t skip = -1;
GetFloorDescription(msg, oldPos.x - 8, oldPos.y - 6, newPos.z + 2, 18, 14, -3, skip);
if (skip >= 0) {
msg.addByte(skip);
msg.addByte(0xFF);
}
}
//moving down a floor makes us out of sync
//east
msg.addByte(0x66);
GetMapDescription(oldPos.x + 9, oldPos.y - 7, newPos.z, 1, 14, msg);
//south
msg.addByte(0x67);
GetMapDescription(oldPos.x - 8, oldPos.y + 7, newPos.z, 18, 1, msg);
}
void ProtocolGame::AddShopItem(NetworkMessage& msg, const ShopInfo& item)
{
const ItemType& it = Item::items[item.itemId];
msg.add<uint16_t>(it.clientId);
if (it.isSplash() || it.isFluidContainer()) {
msg.addByte(serverFluidToClient(item.subType));
} else {
msg.addByte(0x00);
}
msg.addString(item.realName);
msg.add<uint32_t>(it.weight);
msg.add<uint32_t>(item.buyPrice);
msg.add<uint32_t>(item.sellPrice);
}
void ProtocolGame::parseExtendedOpcode(NetworkMessage& msg)
{
uint8_t opcode = msg.getByte();
const std::string& buffer = msg.getString();
// process additional opcodes via lua script event
addGameTask(&Game::parsePlayerExtendedOpcode, player->getID(), opcode, buffer);
}
| 1 | 15,801 | You should use C++-type casts instead. | otland-forgottenserver | cpp |
@@ -865,7 +865,12 @@ func (d *cassandraPersistence) UpdateShard(
}
func (d *cassandraPersistence) CreateWorkflowExecution(
request *p.InternalCreateWorkflowExecutionRequest,
-) (*p.CreateWorkflowExecutionResponse, error) {
+) (*p.InternalCreateWorkflowExecutionResponse, error) {
+ for _, req := range request.NewWorkflowNewEvents {
+ if err := d.AppendHistoryNodes(req); err != nil {
+ return nil, err
+ }
+ }
batch := d.session.NewBatch(gocql.LoggedBatch)
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cassandra
import (
"fmt"
"strings"
"time"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
enumsspb "go.temporal.io/server/api/enums/v1"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common/convert"
"go.temporal.io/server/common/log"
p "go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/persistence/nosql/nosqlplugin/cassandra/gocql"
"go.temporal.io/server/common/persistence/serialization"
"go.temporal.io/server/common/primitives/timestamp"
)
// "go.temporal.io/api/serviceerror"
// Guidelines for creating new special UUID constants
// Each UUID should be of the form: E0000000-R000-f000-f000-00000000000x
// Where x is any hexadecimal value, E represents the entity type valid values are:
// E = {NamespaceID = 1, WorkflowID = 2, RunID = 3}
// R represents row type in executions table, valid values are:
// R = {Shard = 1, Execution = 2, Transfer = 3, Timer = 4, Replication = 5}
const (
// Special Namespaces related constants
emptyNamespaceID = "10000000-0000-f000-f000-000000000000"
// Special Run IDs
emptyRunID = "30000000-0000-f000-f000-000000000000"
permanentRunID = "30000000-0000-f000-f000-000000000001"
// Row Constants for Shard Row
rowTypeShardNamespaceID = "10000000-1000-f000-f000-000000000000"
rowTypeShardWorkflowID = "20000000-1000-f000-f000-000000000000"
rowTypeShardRunID = "30000000-1000-f000-f000-000000000000"
// Row Constants for Transfer Task Row
rowTypeTransferNamespaceID = "10000000-3000-f000-f000-000000000000"
rowTypeTransferWorkflowID = "20000000-3000-f000-f000-000000000000"
rowTypeTransferRunID = "30000000-3000-f000-f000-000000000000"
// Row Constants for Timer Task Row
rowTypeTimerNamespaceID = "10000000-4000-f000-f000-000000000000"
rowTypeTimerWorkflowID = "20000000-4000-f000-f000-000000000000"
rowTypeTimerRunID = "30000000-4000-f000-f000-000000000000"
// Row Constants for Replication Task Row
rowTypeReplicationNamespaceID = "10000000-5000-f000-f000-000000000000"
rowTypeReplicationWorkflowID = "20000000-5000-f000-f000-000000000000"
rowTypeReplicationRunID = "30000000-5000-f000-f000-000000000000"
// Row constants for visibility task row.
rowTypeVisibilityTaskNamespaceID = "10000000-6000-f000-f000-000000000000"
rowTypeVisibilityTaskWorkflowID = "20000000-6000-f000-f000-000000000000"
rowTypeVisibilityTaskRunID = "30000000-6000-f000-f000-000000000000"
// Row Constants for Replication Task DLQ Row. Source cluster name will be used as WorkflowID.
rowTypeDLQNamespaceID = "10000000-6000-f000-f000-000000000000"
rowTypeDLQRunID = "30000000-6000-f000-f000-000000000000"
// Special TaskId constants
rowTypeExecutionTaskID = int64(-10)
rowTypeShardTaskID = int64(-11)
emptyInitiatedID = int64(-7)
)
const (
// Row types for table executions
rowTypeShard = iota
rowTypeExecution
rowTypeTransferTask
rowTypeTimerTask
rowTypeReplicationTask
rowTypeDLQ
rowTypeVisibilityTask
)
const (
// Row types for table tasks
rowTypeTask = iota
rowTypeTaskQueue
)
const (
taskQueueTaskID = -12345
// ref: https://docs.datastax.com/en/dse-trblshoot/doc/troubleshooting/recoveringTtlYear2038Problem.html
maxCassandraTTL = int64(315360000) // Cassandra max support time is 2038-01-19T03:14:06+00:00. Updated this to 10 years to support until year 2028
)
const (
templateCreateShardQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, visibility_ts, task_id, shard, shard_encoding, range_id)` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS`
templateGetShardQuery = `SELECT shard, shard_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateUpdateShardQuery = `UPDATE executions ` +
`SET shard = ?, shard_encoding = ?, range_id = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF range_id = ?`
templateUpdateCurrentWorkflowExecutionQuery = `UPDATE executions USING TTL 0 ` +
`SET current_run_id = ?, execution_state = ?, execution_state_encoding = ?, workflow_last_write_version = ?, workflow_state = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF current_run_id = ? `
templateUpdateCurrentWorkflowExecutionForNewQuery = templateUpdateCurrentWorkflowExecutionQuery +
`and workflow_last_write_version = ? ` +
`and workflow_state = ? `
templateCreateCurrentWorkflowExecutionQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, ` +
`visibility_ts, task_id, current_run_id, execution_state, execution_state_encoding, ` +
`workflow_last_write_version, workflow_state) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS USING TTL 0 `
templateCreateWorkflowExecutionQuery = `INSERT INTO executions (` +
`shard_id, namespace_id, workflow_id, run_id, type, ` +
`execution, execution_encoding, execution_state, execution_state_encoding, next_event_id, db_record_version, ` +
`visibility_ts, task_id, checksum, checksum_encoding) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS `
templateCreateTransferTaskQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, transfer, transfer_encoding, visibility_ts, task_id) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)`
templateCreateReplicationTaskQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, replication, replication_encoding, visibility_ts, task_id) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)`
templateCreateVisibilityTaskQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, visibility_task_data, visibility_task_encoding, visibility_ts, task_id) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)`
templateCreateTimerTaskQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, timer, timer_encoding, visibility_ts, task_id) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)`
templateUpdateLeaseQuery = `UPDATE executions ` +
`SET range_id = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF range_id = ?`
templateGetWorkflowExecutionQuery = `SELECT execution, execution_encoding, execution_state, execution_state_encoding, next_event_id, activity_map, activity_map_encoding, timer_map, timer_map_encoding, ` +
`child_executions_map, child_executions_map_encoding, request_cancel_map, request_cancel_map_encoding, signal_map, signal_map_encoding, signal_requested, buffered_events_list, ` +
`checksum, checksum_encoding, db_record_version ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateGetCurrentExecutionQuery = `SELECT current_run_id, execution, execution_encoding, execution_state, execution_state_encoding, workflow_last_write_version ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateListWorkflowExecutionQuery = `SELECT run_id, execution, execution_encoding, execution_state, execution_state_encoding, next_event_id ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ?`
// TODO deprecate templateUpdateWorkflowExecutionQueryDeprecated in favor of templateUpdateWorkflowExecutionQuery
// Deprecated.
templateUpdateWorkflowExecutionQueryDeprecated = `UPDATE executions ` +
`SET execution = ? ` +
`, execution_encoding = ? ` +
`, execution_state = ? ` +
`, execution_state_encoding = ? ` +
`, next_event_id = ? ` +
`, db_record_version = ? ` +
`, checksum = ? ` +
`, checksum_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF next_event_id = ? `
templateUpdateWorkflowExecutionQuery = `UPDATE executions ` +
`SET execution = ? ` +
`, execution_encoding = ? ` +
`, execution_state = ? ` +
`, execution_state_encoding = ? ` +
`, next_event_id = ? ` +
`, db_record_version = ? ` +
`, checksum = ? ` +
`, checksum_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF db_record_version = ? `
templateUpdateActivityInfoQuery = `UPDATE executions ` +
`SET activity_map[ ? ] = ?, activity_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetActivityInfoQuery = `UPDATE executions ` +
`SET activity_map = ?, activity_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateTimerInfoQuery = `UPDATE executions ` +
`SET timer_map[ ? ] = ?, timer_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetTimerInfoQuery = `UPDATE executions ` +
`SET timer_map = ?, timer_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateChildExecutionInfoQuery = `UPDATE executions ` +
`SET child_executions_map[ ? ] = ?, child_executions_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetChildExecutionInfoQuery = `UPDATE executions ` +
`SET child_executions_map = ?, child_executions_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateRequestCancelInfoQuery = `UPDATE executions ` +
`SET request_cancel_map[ ? ] = ?, request_cancel_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetRequestCancelInfoQuery = `UPDATE executions ` +
`SET request_cancel_map = ?, request_cancel_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateSignalInfoQuery = `UPDATE executions ` +
`SET signal_map[ ? ] = ?, signal_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetSignalInfoQuery = `UPDATE executions ` +
`SET signal_map = ?, signal_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateSignalRequestedQuery = `UPDATE executions ` +
`SET signal_requested = signal_requested + ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetSignalRequestedQuery = `UPDATE executions ` +
`SET signal_requested = ?` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateAppendBufferedEventsQuery = `UPDATE executions ` +
`SET buffered_events_list = buffered_events_list + ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteBufferedEventsQuery = `UPDATE executions ` +
`SET buffered_events_list = [] ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteActivityInfoQuery = `DELETE activity_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteTimerInfoQuery = `DELETE timer_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteChildExecutionInfoQuery = `DELETE child_executions_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteRequestCancelInfoQuery = `DELETE request_cancel_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteSignalInfoQuery = `DELETE signal_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteWorkflowExecutionMutableStateQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteWorkflowExecutionCurrentRowQuery = templateDeleteWorkflowExecutionMutableStateQuery + " if current_run_id = ? "
templateDeleteWorkflowExecutionSignalRequestedQuery = `UPDATE executions ` +
`SET signal_requested = signal_requested - ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetTransferTaskQuery = `SELECT transfer, transfer_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetTransferTasksQuery = `SELECT transfer, transfer_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateGetVisibilityTaskQuery = `SELECT visibility_task_data, visibility_task_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetVisibilityTasksQuery = `SELECT visibility_task_data, visibility_task_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateGetReplicationTaskQuery = `SELECT replication, replication_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetReplicationTasksQuery = `SELECT replication, replication_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateCompleteTransferTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateRangeCompleteTransferTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateCompleteVisibilityTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateRangeCompleteVisibilityTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateCompleteReplicationTaskBeforeQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id <= ?`
templateCompleteReplicationTaskQuery = templateCompleteTransferTaskQuery
templateRangeCompleteReplicationTaskQuery = templateRangeCompleteTransferTaskQuery
templateGetTimerTaskQuery = `SELECT timer, timer_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetTimerTasksQuery = `SELECT timer, timer_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ?` +
`and namespace_id = ? ` +
`and workflow_id = ?` +
`and run_id = ?` +
`and visibility_ts >= ? ` +
`and visibility_ts < ?`
templateCompleteTimerTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ?` +
`and run_id = ?` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateRangeCompleteTimerTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ?` +
`and run_id = ?` +
`and visibility_ts >= ? ` +
`and visibility_ts < ?`
templateCreateTaskQuery = `INSERT INTO tasks (` +
`namespace_id, task_queue_name, task_queue_type, type, task_id, task, task_encoding) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?)`
templateCreateTaskWithTTLQuery = `INSERT INTO tasks (` +
`namespace_id, task_queue_name, task_queue_type, type, task_id, task, task_encoding) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?) USING TTL ?`
templateGetTasksQuery = `SELECT task_id, task, task_encoding ` +
`FROM tasks ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateCompleteTaskQuery = `DELETE FROM tasks ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id = ?`
templateCompleteTasksLessThanQuery = `DELETE FROM tasks ` +
`WHERE namespace_id = ? ` +
`AND task_queue_name = ? ` +
`AND task_queue_type = ? ` +
`AND type = ? ` +
`AND task_id <= ? `
templateGetTaskQueue = `SELECT ` +
`range_id, ` +
`task_queue, ` +
`task_queue_encoding ` +
`FROM tasks ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id = ?`
templateInsertTaskQueueQuery = `INSERT INTO tasks (` +
`namespace_id, ` +
`task_queue_name, ` +
`task_queue_type, ` +
`type, ` +
`task_id, ` +
`range_id, ` +
`task_queue, ` +
`task_queue_encoding ` +
`) VALUES (?, ?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS`
templateUpdateTaskQueueQuery = `UPDATE tasks SET ` +
`range_id = ?, ` +
`task_queue = ?, ` +
`task_queue_encoding = ? ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id = ? ` +
`IF range_id = ?`
templateUpdateTaskQueueQueryWithTTLPart1 = `INSERT INTO tasks (` +
`namespace_id, ` +
`task_queue_name, ` +
`task_queue_type, ` +
`type, ` +
`task_id ` +
`) VALUES (?, ?, ?, ?, ?) USING TTL ?`
templateUpdateTaskQueueQueryWithTTLPart2 = `UPDATE tasks USING TTL ? SET ` +
`range_id = ?, ` +
`task_queue = ?, ` +
`task_queue_encoding = ? ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id = ? ` +
`IF range_id = ?`
templateDeleteTaskQueueQuery = `DELETE FROM tasks ` +
`WHERE namespace_id = ? ` +
`AND task_queue_name = ? ` +
`AND task_queue_type = ? ` +
`AND type = ? ` +
`AND task_id = ? ` +
`IF range_id = ?`
)
var (
defaultDateTime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
defaultVisibilityTimestamp = p.UnixMilliseconds(defaultDateTime)
)
type (
cassandraStore struct {
session gocql.Session
logger log.Logger
}
// Implements ExecutionManager, ShardManager and TaskManager
cassandraPersistence struct {
cassandraStore
currentClusterName string
}
)
var _ p.ExecutionStore = (*cassandraPersistence)(nil)
// newShardPersistence is used to create an instance of ShardManager implementation
func newShardPersistence(
session gocql.Session,
clusterName string,
logger log.Logger,
) (p.ShardStore, error) {
return &cassandraPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// NewExecutionStore is used to create an instance of workflowExecutionManager implementation
func NewExecutionStore(
session gocql.Session,
logger log.Logger,
) p.ExecutionStore {
return &cassandraPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
}
}
// newTaskPersistence is used to create an instance of TaskManager implementation
func newTaskPersistence(
session gocql.Session,
logger log.Logger,
) (p.TaskStore, error) {
return &cassandraPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
}, nil
}
func (d *cassandraStore) GetName() string {
return cassandraPersistenceName
}
// Close releases the underlying resources held by this object
func (d *cassandraStore) Close() {
if d.session != nil {
d.session.Close()
}
}
func (d *cassandraPersistence) GetClusterName() string {
return d.currentClusterName
}
func (d *cassandraPersistence) CreateShard(
request *p.InternalCreateShardRequest,
) error {
query := d.session.Query(templateCreateShardQuery,
request.ShardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.ShardInfo.Data,
request.ShardInfo.EncodingType.String(),
request.RangeID)
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
return gocql.ConvertError("CreateShard", err)
}
if !applied {
return &p.ShardAlreadyExistError{
Msg: fmt.Sprintf("Shard already exists in executions table. ShardId: %v.", request.ShardID),
}
}
return nil
}
func (d *cassandraPersistence) GetShard(
request *p.InternalGetShardRequest,
) (*p.InternalGetShardResponse, error) {
shardID := request.ShardID
query := d.session.Query(templateGetShardQuery,
shardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, gocql.ConvertError("GetShard", err)
}
return &p.InternalGetShardResponse{ShardInfo: p.NewDataBlob(data, encoding)}, nil
}
func (d *cassandraPersistence) UpdateShard(
request *p.InternalUpdateShardRequest,
) error {
query := d.session.Query(templateUpdateShardQuery,
request.ShardInfo.Data,
request.ShardInfo.EncodingType.String(),
request.RangeID,
request.ShardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.PreviousRangeID) // If
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
return gocql.ConvertError("UpdateShard", err)
}
if !applied {
var columns []string
for k, v := range previous {
columns = append(columns, fmt.Sprintf("%s=%v", k, v))
}
return &p.ShardOwnershipLostError{
ShardID: request.ShardID,
Msg: fmt.Sprintf("Failed to update shard. previous_range_id: %v, columns: (%v)",
request.PreviousRangeID, strings.Join(columns, ",")),
}
}
return nil
}
func (d *cassandraPersistence) CreateWorkflowExecution(
request *p.InternalCreateWorkflowExecutionRequest,
) (*p.CreateWorkflowExecutionResponse, error) {
batch := d.session.NewBatch(gocql.LoggedBatch)
shardID := request.ShardID
newWorkflow := request.NewWorkflowSnapshot
lastWriteVersion := newWorkflow.LastWriteVersion
namespaceID := newWorkflow.NamespaceID
workflowID := newWorkflow.WorkflowID
runID := newWorkflow.RunID
var requestCurrentRunID string
switch request.Mode {
case p.CreateWorkflowModeZombie:
// noop
case p.CreateWorkflowModeContinueAsNew:
batch.Query(templateUpdateCurrentWorkflowExecutionQuery,
runID,
newWorkflow.ExecutionStateBlob.Data,
newWorkflow.ExecutionStateBlob.EncodingType.String(),
lastWriteVersion,
newWorkflow.ExecutionState.State,
shardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
request.PreviousRunID,
)
requestCurrentRunID = request.PreviousRunID
case p.CreateWorkflowModeWorkflowIDReuse:
batch.Query(templateUpdateCurrentWorkflowExecutionForNewQuery,
runID,
newWorkflow.ExecutionStateBlob.Data,
newWorkflow.ExecutionStateBlob.EncodingType.String(),
lastWriteVersion,
newWorkflow.ExecutionState.State,
shardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
request.PreviousRunID,
request.PreviousLastWriteVersion,
enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED,
)
requestCurrentRunID = request.PreviousRunID
case p.CreateWorkflowModeBrandNew:
batch.Query(templateCreateCurrentWorkflowExecutionQuery,
shardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
runID,
newWorkflow.ExecutionStateBlob.Data,
newWorkflow.ExecutionStateBlob.EncodingType.String(),
lastWriteVersion,
newWorkflow.ExecutionState.State,
)
requestCurrentRunID = ""
default:
return nil, serviceerror.NewInternal(fmt.Sprintf("unknown mode: %v", request.Mode))
}
if err := applyWorkflowSnapshotBatchAsNew(batch,
request.ShardID,
&newWorkflow,
); err != nil {
return nil, err
}
batch.Query(templateUpdateLeaseQuery,
request.RangeID,
request.ShardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.RangeID,
)
record := make(map[string]interface{})
applied, iter, err := d.session.MapExecuteBatchCAS(batch, record)
if err != nil {
return nil, gocql.ConvertError("CreateWorkflowExecution", err)
}
defer func() {
_ = iter.Close()
}()
if !applied {
return nil, convertErrors(
record,
iter,
shardID,
request.RangeID,
requestCurrentRunID,
[]executionCASCondition{{
runID: newWorkflow.ExecutionState.RunId,
// dbVersion is for CAS, so the db record version will be set to `updateWorkflow.DBRecordVersion`
// while CAS on `updateWorkflow.DBRecordVersion - 1`
dbVersion: newWorkflow.DBRecordVersion - 1,
nextEventID: newWorkflow.Condition,
}},
)
}
return &p.CreateWorkflowExecutionResponse{}, nil
}
func (d *cassandraPersistence) GetWorkflowExecution(
request *p.GetWorkflowExecutionRequest,
) (*p.InternalGetWorkflowExecutionResponse, error) {
execution := request.Execution
query := d.session.Query(templateGetWorkflowExecutionQuery,
request.ShardID,
rowTypeExecution,
request.NamespaceID,
execution.WorkflowId,
execution.GetRunId(),
defaultVisibilityTimestamp,
rowTypeExecutionTaskID)
result := make(map[string]interface{})
if err := query.MapScan(result); err != nil {
return nil, gocql.ConvertError("GetWorkflowExecution", err)
}
state, err := mutableStateFromRow(result)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("GetWorkflowExecution operation failed. Error: %v", err))
}
activityInfos := make(map[int64]*commonpb.DataBlob)
aMap := result["activity_map"].(map[int64][]byte)
aMapEncoding := result["activity_map_encoding"].(string)
for key, value := range aMap {
activityInfos[key] = p.NewDataBlob(value, aMapEncoding)
}
state.ActivityInfos = activityInfos
timerInfos := make(map[string]*commonpb.DataBlob)
tMapEncoding := result["timer_map_encoding"].(string)
tMap := result["timer_map"].(map[string][]byte)
for key, value := range tMap {
timerInfos[key] = p.NewDataBlob(value, tMapEncoding)
}
state.TimerInfos = timerInfos
childExecutionInfos := make(map[int64]*commonpb.DataBlob)
cMap := result["child_executions_map"].(map[int64][]byte)
cMapEncoding := result["child_executions_map_encoding"].(string)
for key, value := range cMap {
childExecutionInfos[key] = p.NewDataBlob(value, cMapEncoding)
}
state.ChildExecutionInfos = childExecutionInfos
requestCancelInfos := make(map[int64]*commonpb.DataBlob)
rMapEncoding := result["request_cancel_map_encoding"].(string)
rMap := result["request_cancel_map"].(map[int64][]byte)
for key, value := range rMap {
requestCancelInfos[key] = p.NewDataBlob(value, rMapEncoding)
}
state.RequestCancelInfos = requestCancelInfos
signalInfos := make(map[int64]*commonpb.DataBlob)
sMapEncoding := result["signal_map_encoding"].(string)
sMap := result["signal_map"].(map[int64][]byte)
for key, value := range sMap {
signalInfos[key] = p.NewDataBlob(value, sMapEncoding)
}
state.SignalInfos = signalInfos
state.SignalRequestedIDs = gocql.UUIDsToStrings(result["signal_requested"])
eList := result["buffered_events_list"].([]map[string]interface{})
bufferedEventsBlobs := make([]*commonpb.DataBlob, 0, len(eList))
for _, v := range eList {
blob := createHistoryEventBatchBlob(v)
bufferedEventsBlobs = append(bufferedEventsBlobs, blob)
}
state.BufferedEvents = bufferedEventsBlobs
state.Checksum = p.NewDataBlob(result["checksum"].([]byte), result["checksum_encoding"].(string))
dbVersion := int64(0)
if dbRecordVersion, ok := result["db_record_version"]; ok {
dbVersion = dbRecordVersion.(int64)
} else {
dbVersion = 0
}
return &p.InternalGetWorkflowExecutionResponse{
State: state,
DBRecordVersion: dbVersion,
}, nil
}
func (d *cassandraPersistence) UpdateWorkflowExecution(
request *p.InternalUpdateWorkflowExecutionRequest,
) error {
// first append history events
for _, req := range request.UpdateWorkflowNewEvents {
if err := d.AppendHistoryNodes(req); err != nil {
return err
}
}
for _, req := range request.NewWorkflowNewEvents {
if err := d.AppendHistoryNodes(req); err != nil {
return err
}
}
// then update mutable state
batch := d.session.NewBatch(gocql.LoggedBatch)
updateWorkflow := request.UpdateWorkflowMutation
newWorkflow := request.NewWorkflowSnapshot
namespaceID := updateWorkflow.NamespaceID
workflowID := updateWorkflow.WorkflowID
runID := updateWorkflow.RunID
shardID := request.ShardID
switch request.Mode {
case p.UpdateWorkflowModeBypassCurrent:
if err := d.assertNotCurrentExecution(
request.ShardID,
namespaceID,
workflowID,
runID); err != nil {
return err
}
case p.UpdateWorkflowModeUpdateCurrent:
if newWorkflow != nil {
newLastWriteVersion := newWorkflow.LastWriteVersion
newNamespaceID := newWorkflow.NamespaceID
newWorkflowID := newWorkflow.WorkflowID
newRunID := newWorkflow.RunID
if namespaceID != newNamespaceID {
return serviceerror.NewInternal(fmt.Sprintf("UpdateWorkflowExecution: cannot continue as new to another namespace"))
}
batch.Query(templateUpdateCurrentWorkflowExecutionQuery,
newRunID,
newWorkflow.ExecutionStateBlob.Data,
newWorkflow.ExecutionStateBlob.EncodingType.String(),
newLastWriteVersion,
newWorkflow.ExecutionState.State,
shardID,
rowTypeExecution,
newNamespaceID,
newWorkflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
runID,
)
} else {
lastWriteVersion := updateWorkflow.LastWriteVersion
executionStateDatablob, err := serialization.WorkflowExecutionStateToBlob(updateWorkflow.ExecutionState)
if err != nil {
return err
}
batch.Query(templateUpdateCurrentWorkflowExecutionQuery,
runID,
executionStateDatablob.Data,
executionStateDatablob.EncodingType.String(),
lastWriteVersion,
updateWorkflow.ExecutionState.State,
request.ShardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
runID,
)
}
default:
return serviceerror.NewInternal(fmt.Sprintf("UpdateWorkflowExecution: unknown mode: %v", request.Mode))
}
if err := applyWorkflowMutationBatch(batch, shardID, &updateWorkflow); err != nil {
return err
}
if newWorkflow != nil {
if err := applyWorkflowSnapshotBatchAsNew(batch,
request.ShardID,
newWorkflow,
); err != nil {
return err
}
}
// Verifies that the RangeID has not changed
batch.Query(templateUpdateLeaseQuery,
request.RangeID,
request.ShardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.RangeID,
)
record := make(map[string]interface{})
applied, iter, err := d.session.MapExecuteBatchCAS(batch, record)
if err != nil {
return gocql.ConvertError("UpdateWorkflowExecution", err)
}
defer func() {
_ = iter.Close()
}()
if !applied {
return convertErrors(
record,
iter,
request.ShardID,
request.RangeID,
updateWorkflow.ExecutionState.RunId,
[]executionCASCondition{{
runID: updateWorkflow.ExecutionState.RunId,
// dbVersion is for CAS, so the db record version will be set to `updateWorkflow.DBRecordVersion`
// while CAS on `updateWorkflow.DBRecordVersion - 1`
dbVersion: updateWorkflow.DBRecordVersion - 1,
nextEventID: updateWorkflow.Condition,
}},
)
}
return nil
}
func (d *cassandraPersistence) ConflictResolveWorkflowExecution(
request *p.InternalConflictResolveWorkflowExecutionRequest,
) error {
batch := d.session.NewBatch(gocql.LoggedBatch)
currentWorkflow := request.CurrentWorkflowMutation
resetWorkflow := request.ResetWorkflowSnapshot
newWorkflow := request.NewWorkflowSnapshot
shardID := request.ShardID
namespaceID := resetWorkflow.NamespaceID
workflowID := resetWorkflow.WorkflowID
var currentRunID string
switch request.Mode {
case p.ConflictResolveWorkflowModeBypassCurrent:
if err := d.assertNotCurrentExecution(
shardID,
namespaceID,
workflowID,
resetWorkflow.ExecutionState.RunId,
); err != nil {
return err
}
case p.ConflictResolveWorkflowModeUpdateCurrent:
executionState := resetWorkflow.ExecutionState
lastWriteVersion := resetWorkflow.LastWriteVersion
if newWorkflow != nil {
lastWriteVersion = newWorkflow.LastWriteVersion
executionState = newWorkflow.ExecutionState
}
runID := executionState.RunId
createRequestID := executionState.CreateRequestId
state := executionState.State
status := executionState.Status
executionStateDatablob, err := serialization.WorkflowExecutionStateToBlob(&persistencespb.WorkflowExecutionState{
RunId: runID,
CreateRequestId: createRequestID,
State: state,
Status: status,
})
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("ConflictResolveWorkflowExecution operation failed. Error: %v", err))
}
if currentWorkflow != nil {
currentRunID = currentWorkflow.ExecutionState.RunId
batch.Query(templateUpdateCurrentWorkflowExecutionQuery,
runID,
executionStateDatablob.Data,
executionStateDatablob.EncodingType.String(),
lastWriteVersion,
state,
shardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
currentRunID,
)
} else {
// reset workflow is current
currentRunID = resetWorkflow.ExecutionState.RunId
batch.Query(templateUpdateCurrentWorkflowExecutionQuery,
runID,
executionStateDatablob.Data,
executionStateDatablob.EncodingType.String(),
lastWriteVersion,
state,
shardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
currentRunID,
)
}
default:
return serviceerror.NewInternal(fmt.Sprintf("ConflictResolveWorkflowExecution: unknown mode: %v", request.Mode))
}
if err := applyWorkflowSnapshotBatchAsReset(batch, shardID, &resetWorkflow); err != nil {
return err
}
if currentWorkflow != nil {
if err := applyWorkflowMutationBatch(batch, shardID, currentWorkflow); err != nil {
return err
}
}
if newWorkflow != nil {
if err := applyWorkflowSnapshotBatchAsNew(batch, shardID, newWorkflow); err != nil {
return err
}
}
// Verifies that the RangeID has not changed
batch.Query(templateUpdateLeaseQuery,
request.RangeID,
request.ShardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.RangeID,
)
record := make(map[string]interface{})
applied, iter, err := d.session.MapExecuteBatchCAS(batch, record)
if err != nil {
return gocql.ConvertError("ConflictResolveWorkflowExecution", err)
}
defer func() {
_ = iter.Close()
}()
if !applied {
executionCASConditions := []executionCASCondition{{
runID: resetWorkflow.RunID,
// dbVersion is for CAS, so the db record version will be set to `resetWorkflow.DBRecordVersion`
// while CAS on `resetWorkflow.DBRecordVersion - 1`
dbVersion: resetWorkflow.DBRecordVersion - 1,
nextEventID: resetWorkflow.Condition,
}}
if currentWorkflow != nil {
executionCASConditions = append(executionCASConditions, executionCASCondition{
runID: currentWorkflow.RunID,
// dbVersion is for CAS, so the db record version will be set to `currentWorkflow.DBRecordVersion`
// while CAS on `currentWorkflow.DBRecordVersion - 1`
dbVersion: currentWorkflow.DBRecordVersion - 1,
nextEventID: currentWorkflow.Condition,
})
}
return convertErrors(
record,
iter,
request.ShardID,
request.RangeID,
currentRunID,
executionCASConditions,
)
}
return nil
}
func (d *cassandraPersistence) assertNotCurrentExecution(
shardID int32,
namespaceID string,
workflowID string,
runID string,
) error {
if resp, err := d.GetCurrentExecution(&p.GetCurrentExecutionRequest{
ShardID: shardID,
NamespaceID: namespaceID,
WorkflowID: workflowID,
}); err != nil {
if _, ok := err.(*serviceerror.NotFound); ok {
// allow bypassing no current record
return nil
}
return err
} else if resp.RunID == runID {
return &p.ConditionFailedError{
Msg: fmt.Sprintf("Assertion on current record failed. Current run ID is not expected: %v", resp.RunID),
}
}
return nil
}
func (d *cassandraPersistence) DeleteWorkflowExecution(
request *p.DeleteWorkflowExecutionRequest,
) error {
query := d.session.Query(templateDeleteWorkflowExecutionMutableStateQuery,
request.ShardID,
rowTypeExecution,
request.NamespaceID,
request.WorkflowID,
request.RunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID)
err := query.Exec()
return gocql.ConvertError("DeleteWorkflowExecution", err)
}
func (d *cassandraPersistence) DeleteCurrentWorkflowExecution(
request *p.DeleteCurrentWorkflowExecutionRequest,
) error {
query := d.session.Query(templateDeleteWorkflowExecutionCurrentRowQuery,
request.ShardID,
rowTypeExecution,
request.NamespaceID,
request.WorkflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
request.RunID)
err := query.Exec()
return gocql.ConvertError("DeleteWorkflowCurrentRow", err)
}
func (d *cassandraPersistence) GetCurrentExecution(
request *p.GetCurrentExecutionRequest,
) (*p.InternalGetCurrentExecutionResponse,
error) {
query := d.session.Query(templateGetCurrentExecutionQuery,
request.ShardID,
rowTypeExecution,
request.NamespaceID,
request.WorkflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID)
result := make(map[string]interface{})
if err := query.MapScan(result); err != nil {
return nil, gocql.ConvertError("GetCurrentExecution", err)
}
currentRunID := gocql.UUIDToString(result["current_run_id"])
lastWriteVersion := result["workflow_last_write_version"].(int64)
executionStateBlob, err := executionStateBlobFromRow(result)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("GetCurrentExecution operation failed. Error: %v", err))
}
// TODO: fix blob ExecutionState in storage should not be a blob.
executionState, err := serialization.WorkflowExecutionStateFromBlob(executionStateBlob.Data, executionStateBlob.EncodingType.String())
if err != nil {
return nil, err
}
return &p.InternalGetCurrentExecutionResponse{
RunID: currentRunID,
ExecutionState: executionState,
LastWriteVersion: lastWriteVersion,
}, nil
}
func (d *cassandraPersistence) ListConcreteExecutions(
request *p.ListConcreteExecutionsRequest,
) (*p.InternalListConcreteExecutionsResponse, error) {
query := d.session.Query(
templateListWorkflowExecutionQuery,
request.ShardID,
rowTypeExecution,
)
iter := query.PageSize(request.PageSize).PageState(request.PageToken).Iter()
response := &p.InternalListConcreteExecutionsResponse{}
result := make(map[string]interface{})
for iter.MapScan(result) {
runID := gocql.UUIDToString(result["run_id"])
if runID == permanentRunID {
result = make(map[string]interface{})
continue
}
if _, ok := result["execution"]; ok {
state, err := mutableStateFromRow(result)
if err != nil {
return nil, err
}
response.States = append(response.States, state)
}
result = make(map[string]interface{})
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
return response, nil
}
func (d *cassandraPersistence) AddTasks(
request *p.AddTasksRequest,
) error {
batch := d.session.NewBatch(gocql.LoggedBatch)
if err := applyTasks(
batch,
request.ShardID,
request.NamespaceID,
request.WorkflowID,
request.RunID,
request.TransferTasks,
request.TimerTasks,
request.ReplicationTasks,
request.VisibilityTasks,
); err != nil {
return err
}
batch.Query(templateUpdateLeaseQuery,
request.RangeID,
request.ShardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.RangeID,
)
previous := make(map[string]interface{})
applied, iter, err := d.session.MapExecuteBatchCAS(batch, previous)
if err != nil {
return gocql.ConvertError("AddTasks", err)
}
defer func() {
_ = iter.Close()
}()
if !applied {
if previousRangeID, ok := previous["range_id"].(int64); ok && previousRangeID != request.RangeID {
// CreateWorkflowExecution failed because rangeID was modified
return &p.ShardOwnershipLostError{
ShardID: request.ShardID,
Msg: fmt.Sprintf("Failed to add tasks. Request RangeID: %v, Actual RangeID: %v", request.RangeID, previousRangeID),
}
} else {
return serviceerror.NewInternal("AddTasks operation failed: %v")
}
}
return nil
}
func (d *cassandraPersistence) GetTransferTask(
request *p.GetTransferTaskRequest,
) (*p.GetTransferTaskResponse, error) {
shardID := request.ShardID
taskID := request.TaskID
query := d.session.Query(templateGetTransferTaskQuery,
shardID,
rowTypeTransferTask,
rowTypeTransferNamespaceID,
rowTypeTransferWorkflowID,
rowTypeTransferRunID,
defaultVisibilityTimestamp,
taskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, gocql.ConvertError("GetTransferTask", err)
}
info, err := serialization.TransferTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, gocql.ConvertError("GetTransferTask", err)
}
return &p.GetTransferTaskResponse{TransferTaskInfo: info}, nil
}
func (d *cassandraPersistence) GetTransferTasks(
request *p.GetTransferTasksRequest,
) (*p.GetTransferTasksResponse, error) {
// Reading transfer tasks need to be quorum level consistent, otherwise we could lose task
query := d.session.Query(templateGetTransferTasksQuery,
request.ShardID,
rowTypeTransferTask,
rowTypeTransferNamespaceID,
rowTypeTransferWorkflowID,
rowTypeTransferRunID,
defaultVisibilityTimestamp,
request.ReadLevel,
request.MaxReadLevel,
)
iter := query.PageSize(request.BatchSize).PageState(request.NextPageToken).Iter()
response := &p.GetTransferTasksResponse{}
var data []byte
var encoding string
for iter.Scan(&data, &encoding) {
t, err := serialization.TransferTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, gocql.ConvertError("GetTransferTasks", err)
}
response.Tasks = append(response.Tasks, t)
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
if err := iter.Close(); err != nil {
return nil, gocql.ConvertError("GetTransferTasks", err)
}
return response, nil
}
func (d *cassandraPersistence) GetVisibilityTask(
request *p.GetVisibilityTaskRequest,
) (*p.GetVisibilityTaskResponse, error) {
shardID := request.ShardID
taskID := request.TaskID
query := d.session.Query(templateGetVisibilityTaskQuery,
shardID,
rowTypeVisibilityTask,
rowTypeVisibilityTaskNamespaceID,
rowTypeVisibilityTaskWorkflowID,
rowTypeVisibilityTaskRunID,
defaultVisibilityTimestamp,
taskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, gocql.ConvertError("GetVisibilityTask", err)
}
info, err := serialization.VisibilityTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, gocql.ConvertError("GetVisibilityTask", err)
}
return &p.GetVisibilityTaskResponse{VisibilityTaskInfo: info}, nil
}
func (d *cassandraPersistence) GetVisibilityTasks(
request *p.GetVisibilityTasksRequest,
) (*p.GetVisibilityTasksResponse, error) {
// Reading Visibility tasks need to be quorum level consistent, otherwise we could lose task
query := d.session.Query(templateGetVisibilityTasksQuery,
request.ShardID,
rowTypeVisibilityTask,
rowTypeVisibilityTaskNamespaceID,
rowTypeVisibilityTaskWorkflowID,
rowTypeVisibilityTaskRunID,
defaultVisibilityTimestamp,
request.ReadLevel,
request.MaxReadLevel,
)
iter := query.PageSize(request.BatchSize).PageState(request.NextPageToken).Iter()
response := &p.GetVisibilityTasksResponse{}
var data []byte
var encoding string
for iter.Scan(&data, &encoding) {
t, err := serialization.VisibilityTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, gocql.ConvertError("GetVisibilityTasks", err)
}
response.Tasks = append(response.Tasks, t)
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
if err := iter.Close(); err != nil {
return nil, gocql.ConvertError("GetVisibilityTasks", err)
}
return response, nil
}
func (d *cassandraPersistence) GetReplicationTask(
request *p.GetReplicationTaskRequest,
) (*p.GetReplicationTaskResponse, error) {
shardID := request.ShardID
taskID := request.TaskID
query := d.session.Query(templateGetReplicationTaskQuery,
shardID,
rowTypeReplicationTask,
rowTypeReplicationNamespaceID,
rowTypeReplicationWorkflowID,
rowTypeReplicationRunID,
defaultVisibilityTimestamp,
taskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, gocql.ConvertError("GetReplicationTask", err)
}
info, err := serialization.ReplicationTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, gocql.ConvertError("GetReplicationTask", err)
}
return &p.GetReplicationTaskResponse{ReplicationTaskInfo: info}, nil
}
func (d *cassandraPersistence) GetReplicationTasks(
request *p.GetReplicationTasksRequest,
) (*p.GetReplicationTasksResponse, error) {
// Reading replication tasks need to be quorum level consistent, otherwise we could lose task
query := d.session.Query(templateGetReplicationTasksQuery,
request.ShardID,
rowTypeReplicationTask,
rowTypeReplicationNamespaceID,
rowTypeReplicationWorkflowID,
rowTypeReplicationRunID,
defaultVisibilityTimestamp,
request.MinTaskID,
request.MaxTaskID,
).PageSize(request.BatchSize).PageState(request.NextPageToken)
return d.populateGetReplicationTasksResponse(query, "GetReplicationTasks")
}
func (d *cassandraPersistence) populateGetReplicationTasksResponse(
query gocql.Query,
operation string,
) (*p.GetReplicationTasksResponse, error) {
iter := query.Iter()
response := &p.GetReplicationTasksResponse{}
var data []byte
var encoding string
for iter.Scan(&data, &encoding) {
t, err := serialization.ReplicationTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, gocql.ConvertError(operation, err)
}
response.Tasks = append(response.Tasks, t)
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
if err := iter.Close(); err != nil {
return nil, gocql.ConvertError(operation, err)
}
return response, nil
}
func (d *cassandraPersistence) CompleteTransferTask(
request *p.CompleteTransferTaskRequest,
) error {
query := d.session.Query(templateCompleteTransferTaskQuery,
request.ShardID,
rowTypeTransferTask,
rowTypeTransferNamespaceID,
rowTypeTransferWorkflowID,
rowTypeTransferRunID,
defaultVisibilityTimestamp,
request.TaskID)
err := query.Exec()
return gocql.ConvertError("CompleteTransferTask", err)
}
func (d *cassandraPersistence) RangeCompleteTransferTask(
request *p.RangeCompleteTransferTaskRequest,
) error {
query := d.session.Query(templateRangeCompleteTransferTaskQuery,
request.ShardID,
rowTypeTransferTask,
rowTypeTransferNamespaceID,
rowTypeTransferWorkflowID,
rowTypeTransferRunID,
defaultVisibilityTimestamp,
request.ExclusiveBeginTaskID,
request.InclusiveEndTaskID,
)
err := query.Exec()
return gocql.ConvertError("RangeCompleteTransferTask", err)
}
func (d *cassandraPersistence) CompleteVisibilityTask(
request *p.CompleteVisibilityTaskRequest,
) error {
query := d.session.Query(templateCompleteVisibilityTaskQuery,
request.ShardID,
rowTypeVisibilityTask,
rowTypeVisibilityTaskNamespaceID,
rowTypeVisibilityTaskWorkflowID,
rowTypeVisibilityTaskRunID,
defaultVisibilityTimestamp,
request.TaskID)
err := query.Exec()
return gocql.ConvertError("CompleteVisibilityTask", err)
}
func (d *cassandraPersistence) RangeCompleteVisibilityTask(
request *p.RangeCompleteVisibilityTaskRequest,
) error {
query := d.session.Query(templateRangeCompleteVisibilityTaskQuery,
request.ShardID,
rowTypeVisibilityTask,
rowTypeVisibilityTaskNamespaceID,
rowTypeVisibilityTaskWorkflowID,
rowTypeVisibilityTaskRunID,
defaultVisibilityTimestamp,
request.ExclusiveBeginTaskID,
request.InclusiveEndTaskID,
)
err := query.Exec()
return gocql.ConvertError("RangeCompleteVisibilityTask", err)
}
func (d *cassandraPersistence) CompleteReplicationTask(
request *p.CompleteReplicationTaskRequest,
) error {
query := d.session.Query(templateCompleteReplicationTaskQuery,
request.ShardID,
rowTypeReplicationTask,
rowTypeReplicationNamespaceID,
rowTypeReplicationWorkflowID,
rowTypeReplicationRunID,
defaultVisibilityTimestamp,
request.TaskID)
err := query.Exec()
return gocql.ConvertError("CompleteReplicationTask", err)
}
func (d *cassandraPersistence) RangeCompleteReplicationTask(
request *p.RangeCompleteReplicationTaskRequest,
) error {
query := d.session.Query(templateCompleteReplicationTaskBeforeQuery,
request.ShardID,
rowTypeReplicationTask,
rowTypeReplicationNamespaceID,
rowTypeReplicationWorkflowID,
rowTypeReplicationRunID,
defaultVisibilityTimestamp,
request.InclusiveEndTaskID,
)
err := query.Exec()
return gocql.ConvertError("RangeCompleteReplicationTask", err)
}
func (d *cassandraPersistence) CompleteTimerTask(
request *p.CompleteTimerTaskRequest,
) error {
ts := p.UnixMilliseconds(request.VisibilityTimestamp)
query := d.session.Query(templateCompleteTimerTaskQuery,
request.ShardID,
rowTypeTimerTask,
rowTypeTimerNamespaceID,
rowTypeTimerWorkflowID,
rowTypeTimerRunID,
ts,
request.TaskID)
err := query.Exec()
return gocql.ConvertError("CompleteTimerTask", err)
}
func (d *cassandraPersistence) RangeCompleteTimerTask(
request *p.RangeCompleteTimerTaskRequest,
) error {
start := p.UnixMilliseconds(request.InclusiveBeginTimestamp)
end := p.UnixMilliseconds(request.ExclusiveEndTimestamp)
query := d.session.Query(templateRangeCompleteTimerTaskQuery,
request.ShardID,
rowTypeTimerTask,
rowTypeTimerNamespaceID,
rowTypeTimerWorkflowID,
rowTypeTimerRunID,
start,
end,
)
err := query.Exec()
return gocql.ConvertError("RangeCompleteTimerTask", err)
}
func (d *cassandraPersistence) CreateTaskQueue(
request *p.InternalCreateTaskQueueRequest,
) error {
query := d.session.Query(templateInsertTaskQueueQuery,
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
request.RangeID,
request.TaskQueueInfo.Data,
request.TaskQueueInfo.EncodingType.String(),
)
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
return gocql.ConvertError("LeaseTaskQueue", err)
}
if !applied {
previousRangeID := previous["range_id"]
return &p.ConditionFailedError{
Msg: fmt.Sprintf("CreateTaskQueue: TaskQueue:%v, TaskQueueType:%v, PreviousRangeID:%v",
request.TaskQueue, request.TaskType, previousRangeID),
}
}
return nil
}
func (d *cassandraPersistence) GetTaskQueue(
request *p.InternalGetTaskQueueRequest,
) (*p.InternalGetTaskQueueResponse, error) {
query := d.session.Query(templateGetTaskQueue,
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
)
var rangeID int64
var tlBytes []byte
var tlEncoding string
if err := query.Scan(&rangeID, &tlBytes, &tlEncoding); err != nil {
return nil, gocql.ConvertError("GetTaskQueue", err)
}
return &p.InternalGetTaskQueueResponse{
RangeID: rangeID,
TaskQueueInfo: p.NewDataBlob(tlBytes, tlEncoding),
}, nil
}
func (d *cassandraPersistence) ExtendLease(
request *p.InternalExtendLeaseRequest,
) error {
query := d.session.Query(templateUpdateTaskQueueQuery,
request.RangeID+1,
request.TaskQueueInfo.Data,
request.TaskQueueInfo.EncodingType.String(),
request.NamespaceID,
&request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
request.RangeID,
)
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
return gocql.ConvertError("LeaseTaskQueue", err)
}
if !applied {
previousRangeID := previous["range_id"]
return &p.ConditionFailedError{
Msg: fmt.Sprintf("ExtendLease: taskQueue:%v, taskQueueType:%v, haveRangeID:%v, gotRangeID:%v",
request.TaskQueue, request.TaskType, request.RangeID, previousRangeID),
}
}
return nil
}
// UpdateTaskQueue update task queue
func (d *cassandraPersistence) UpdateTaskQueue(
request *p.InternalUpdateTaskQueueRequest,
) (*p.UpdateTaskQueueResponse, error) {
var err error
var applied bool
previous := make(map[string]interface{})
if request.TaskQueueKind == enumspb.TASK_QUEUE_KIND_STICKY { // if task_queue is sticky, then update with TTL
if request.ExpiryTime == nil {
return nil, serviceerror.NewInternal("ExpiryTime cannot be nil for sticky task queue")
}
expiryTtl := convert.Int64Ceil(time.Until(timestamp.TimeValue(request.ExpiryTime)).Seconds())
batch := d.session.NewBatch(gocql.LoggedBatch)
batch.Query(templateUpdateTaskQueueQueryWithTTLPart1,
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
expiryTtl,
)
batch.Query(templateUpdateTaskQueueQueryWithTTLPart2,
expiryTtl,
request.RangeID,
request.TaskQueueInfo.Data,
request.TaskQueueInfo.EncodingType.String(),
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
request.RangeID,
)
applied, _, err = d.session.MapExecuteBatchCAS(batch, previous)
} else {
query := d.session.Query(templateUpdateTaskQueueQuery,
request.RangeID,
request.TaskQueueInfo.Data,
request.TaskQueueInfo.EncodingType.String(),
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
request.RangeID,
)
applied, err = query.MapScanCAS(previous)
}
if err != nil {
return nil, gocql.ConvertError("UpdateTaskQueue", err)
}
if !applied {
var columns []string
for k, v := range previous {
columns = append(columns, fmt.Sprintf("%s=%v", k, v))
}
return nil, &p.ConditionFailedError{
Msg: fmt.Sprintf("Failed to update task queue. name: %v, type: %v, rangeID: %v, columns: (%v)",
request.TaskQueue, request.TaskType, request.RangeID, strings.Join(columns, ",")),
}
}
return &p.UpdateTaskQueueResponse{}, nil
}
func (d *cassandraPersistence) ListTaskQueue(
_ *p.ListTaskQueueRequest,
) (*p.InternalListTaskQueueResponse, error) {
return nil, serviceerror.NewInternal(fmt.Sprintf("unsupported operation"))
}
func (d *cassandraPersistence) DeleteTaskQueue(
request *p.DeleteTaskQueueRequest,
) error {
query := d.session.Query(templateDeleteTaskQueueQuery,
request.TaskQueue.NamespaceID, request.TaskQueue.Name, request.TaskQueue.TaskType, rowTypeTaskQueue, taskQueueTaskID, request.RangeID)
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
return gocql.ConvertError("DeleteTaskQueue", err)
}
if !applied {
return &p.ConditionFailedError{
Msg: fmt.Sprintf("DeleteTaskQueue operation failed: expected_range_id=%v but found %+v", request.RangeID, previous),
}
}
return nil
}
// CreateTasks add tasks
func (d *cassandraPersistence) CreateTasks(
request *p.InternalCreateTasksRequest,
) (*p.CreateTasksResponse, error) {
batch := d.session.NewBatch(gocql.LoggedBatch)
namespaceID := request.NamespaceID
taskQueue := request.TaskQueue
taskQueueType := request.TaskType
for _, task := range request.Tasks {
ttl := GetTaskTTL(task.ExpiryTime)
if ttl <= 0 || ttl > maxCassandraTTL {
batch.Query(templateCreateTaskQuery,
namespaceID,
taskQueue,
taskQueueType,
rowTypeTask,
task.TaskId,
task.Task.Data,
task.Task.EncodingType.String())
} else {
batch.Query(templateCreateTaskWithTTLQuery,
namespaceID,
taskQueue,
taskQueueType,
rowTypeTask,
task.TaskId,
task.Task.Data,
task.Task.EncodingType.String(),
ttl)
}
}
// The following query is used to ensure that range_id didn't change
batch.Query(templateUpdateTaskQueueQuery,
request.RangeID,
request.TaskQueueInfo.Data,
request.TaskQueueInfo.EncodingType.String(),
namespaceID,
taskQueue,
taskQueueType,
rowTypeTaskQueue,
taskQueueTaskID,
request.RangeID,
)
previous := make(map[string]interface{})
applied, _, err := d.session.MapExecuteBatchCAS(batch, previous)
if err != nil {
return nil, gocql.ConvertError("CreateTasks", err)
}
if !applied {
rangeID := previous["range_id"]
return nil, &p.ConditionFailedError{
Msg: fmt.Sprintf("Failed to create task. TaskQueue: %v, taskQueueType: %v, rangeID: %v, db rangeID: %v",
taskQueue, taskQueueType, request.RangeID, rangeID),
}
}
return &p.CreateTasksResponse{}, nil
}
func GetTaskTTL(expireTime *time.Time) int64 {
var ttl int64 = 0
if expireTime != nil {
expiryTtl := convert.Int64Ceil(time.Until(timestamp.TimeValue(expireTime)).Seconds())
// 0 means no ttl, we dont want that.
// Todo: Come back and correctly ignore expired in-memory tasks before persisting
if expiryTtl < 1 {
expiryTtl = 1
}
ttl = expiryTtl
}
return ttl
}
// GetTasks get a task
func (d *cassandraPersistence) GetTasks(
request *p.GetTasksRequest,
) (*p.InternalGetTasksResponse, error) {
if request.MaxReadLevel == nil {
return nil, serviceerror.NewInternal("getTasks: both readLevel and maxReadLevel MUST be specified for cassandra persistence")
}
if request.ReadLevel > *request.MaxReadLevel {
return &p.InternalGetTasksResponse{}, nil
}
// Reading taskqueue tasks need to be quorum level consistent, otherwise we could lose tasks
query := d.session.Query(templateGetTasksQuery,
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTask,
request.ReadLevel,
*request.MaxReadLevel,
)
iter := query.PageSize(request.BatchSize).Iter()
response := &p.InternalGetTasksResponse{}
task := make(map[string]interface{})
PopulateTasks:
for iter.MapScan(task) {
_, ok := task["task_id"]
if !ok { // no tasks, but static column record returned
continue
}
rawTask, ok := task["task"]
if !ok {
return nil, newFieldNotFoundError("task", task)
}
taskVal, ok := rawTask.([]byte)
if !ok {
var byteSliceType []byte
return nil, newPersistedTypeMismatchError("task", byteSliceType, rawTask, task)
}
rawEncoding, ok := task["task_encoding"]
if !ok {
return nil, newFieldNotFoundError("task_encoding", task)
}
encodingVal, ok := rawEncoding.(string)
if !ok {
var byteSliceType []byte
return nil, newPersistedTypeMismatchError("task_encoding", byteSliceType, rawEncoding, task)
}
response.Tasks = append(response.Tasks, p.NewDataBlob(taskVal, encodingVal))
if len(response.Tasks) == request.BatchSize {
break PopulateTasks
}
task = make(map[string]interface{}) // Reinitialize map as initialized fails on unmarshalling
}
if err := iter.Close(); err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("GetTasks operation failed. Error: %v", err))
}
return response, nil
}
// CompleteTask delete a task
func (d *cassandraPersistence) CompleteTask(
request *p.CompleteTaskRequest,
) error {
tli := request.TaskQueue
query := d.session.Query(templateCompleteTaskQuery,
tli.NamespaceID,
tli.Name,
tli.TaskType,
rowTypeTask,
request.TaskID)
err := query.Exec()
if err != nil {
return gocql.ConvertError("CompleteTask", err)
}
return nil
}
// CompleteTasksLessThan deletes all tasks less than or equal to the given task id. This API ignores the
// Limit request parameter i.e. either all tasks leq the task_id will be deleted or an error will
// be returned to the caller
func (d *cassandraPersistence) CompleteTasksLessThan(
request *p.CompleteTasksLessThanRequest,
) (int, error) {
query := d.session.Query(templateCompleteTasksLessThanQuery,
request.NamespaceID, request.TaskQueueName, request.TaskType, rowTypeTask, request.TaskID)
err := query.Exec()
if err != nil {
return 0, gocql.ConvertError("CompleteTasksLessThan", err)
}
return p.UnknownNumRowsAffected, nil
}
func (d *cassandraPersistence) GetTimerTask(
request *p.GetTimerTaskRequest,
) (*p.GetTimerTaskResponse, error) {
shardID := request.ShardID
taskID := request.TaskID
visibilityTs := request.VisibilityTimestamp
query := d.session.Query(templateGetTimerTaskQuery,
shardID,
rowTypeTimerTask,
rowTypeTimerNamespaceID,
rowTypeTimerWorkflowID,
rowTypeTimerRunID,
visibilityTs,
taskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, gocql.ConvertError("GetTimerTask", err)
}
info, err := serialization.TimerTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, gocql.ConvertError("GetTimerTask", err)
}
return &p.GetTimerTaskResponse{TimerTaskInfo: info}, nil
}
func (d *cassandraPersistence) GetTimerIndexTasks(
request *p.GetTimerIndexTasksRequest,
) (*p.GetTimerIndexTasksResponse, error) {
// Reading timer tasks need to be quorum level consistent, otherwise we could lose tasks
minTimestamp := p.UnixMilliseconds(request.MinTimestamp)
maxTimestamp := p.UnixMilliseconds(request.MaxTimestamp)
query := d.session.Query(templateGetTimerTasksQuery,
request.ShardID,
rowTypeTimerTask,
rowTypeTimerNamespaceID,
rowTypeTimerWorkflowID,
rowTypeTimerRunID,
minTimestamp,
maxTimestamp,
)
iter := query.PageSize(request.BatchSize).PageState(request.NextPageToken).Iter()
response := &p.GetTimerIndexTasksResponse{}
var data []byte
var encoding string
for iter.Scan(&data, &encoding) {
t, err := serialization.TimerTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, gocql.ConvertError("GetTimerIndexTasks", err)
}
response.Timers = append(response.Timers, t)
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
if err := iter.Close(); err != nil {
return nil, gocql.ConvertError("GetTimerIndexTasks", err)
}
return response, nil
}
func (d *cassandraPersistence) PutReplicationTaskToDLQ(
request *p.PutReplicationTaskToDLQRequest,
) error {
task := request.TaskInfo
datablob, err := serialization.ReplicationTaskInfoToBlob(task)
if err != nil {
return gocql.ConvertError("PutReplicationTaskToDLQ", err)
}
// Use source cluster name as the workflow id for replication dlq
query := d.session.Query(templateCreateReplicationTaskQuery,
request.ShardID,
rowTypeDLQ,
rowTypeDLQNamespaceID,
request.SourceClusterName,
rowTypeDLQRunID,
datablob.Data,
datablob.EncodingType.String(),
defaultVisibilityTimestamp,
task.GetTaskId())
err = query.Exec()
if err != nil {
return gocql.ConvertError("PutReplicationTaskToDLQ", err)
}
return nil
}
func (d *cassandraPersistence) GetReplicationTasksFromDLQ(
request *p.GetReplicationTasksFromDLQRequest,
) (*p.GetReplicationTasksFromDLQResponse, error) {
// Reading replication tasks need to be quorum level consistent, otherwise we could lose tasks
query := d.session.Query(templateGetReplicationTasksQuery,
request.ShardID,
rowTypeDLQ,
rowTypeDLQNamespaceID,
request.SourceClusterName,
rowTypeDLQRunID,
defaultVisibilityTimestamp,
request.MinTaskID,
request.MinTaskID+int64(request.BatchSize),
).PageSize(request.BatchSize).PageState(request.NextPageToken)
return d.populateGetReplicationTasksResponse(query, "GetReplicationTasksFromDLQ")
}
func (d *cassandraPersistence) DeleteReplicationTaskFromDLQ(
request *p.DeleteReplicationTaskFromDLQRequest,
) error {
query := d.session.Query(templateCompleteReplicationTaskQuery,
request.ShardID,
rowTypeDLQ,
rowTypeDLQNamespaceID,
request.SourceClusterName,
rowTypeDLQRunID,
defaultVisibilityTimestamp,
request.TaskID,
)
err := query.Exec()
return gocql.ConvertError("DeleteReplicationTaskFromDLQ", err)
}
func (d *cassandraPersistence) RangeDeleteReplicationTaskFromDLQ(
request *p.RangeDeleteReplicationTaskFromDLQRequest,
) error {
query := d.session.Query(templateRangeCompleteReplicationTaskQuery,
request.ShardID,
rowTypeDLQ,
rowTypeDLQNamespaceID,
request.SourceClusterName,
rowTypeDLQRunID,
defaultVisibilityTimestamp,
request.ExclusiveBeginTaskID,
request.InclusiveEndTaskID,
)
err := query.Exec()
return gocql.ConvertError("RangeDeleteReplicationTaskFromDLQ", err)
}
func mutableStateFromRow(
result map[string]interface{},
) (*p.InternalWorkflowMutableState, error) {
eiBytes, ok := result["execution"].([]byte)
if !ok {
return nil, newPersistedTypeMismatchError("execution", "", eiBytes, result)
}
eiEncoding, ok := result["execution_encoding"].(string)
if !ok {
return nil, newPersistedTypeMismatchError("execution_encoding", "", eiEncoding, result)
}
nextEventID, ok := result["next_event_id"].(int64)
if !ok {
return nil, newPersistedTypeMismatchError("next_event_id", "", nextEventID, result)
}
protoState, err := executionStateBlobFromRow(result)
if err != nil {
return nil, err
}
mutableState := &p.InternalWorkflowMutableState{
ExecutionInfo: p.NewDataBlob(eiBytes, eiEncoding),
ExecutionState: protoState,
NextEventID: nextEventID,
}
return mutableState, nil
}
func executionStateBlobFromRow(
result map[string]interface{},
) (*commonpb.DataBlob, error) {
state, ok := result["execution_state"].([]byte)
if !ok {
return nil, newPersistedTypeMismatchError("execution_state", "", state, result)
}
stateEncoding, ok := result["execution_state_encoding"].(string)
if !ok {
return nil, newPersistedTypeMismatchError("execution_state_encoding", "", stateEncoding, result)
}
return p.NewDataBlob(state, stateEncoding), nil
}
| 1 | 12,461 | Looks like each AppendHistoryNodes is its own transaction - what happens if `len(request.NewWorkflowNewEvents) == 5` but we only succeed in writing 3 of them? Will the system be able to pick up from there later? I'm _guessing_ the answer is related to the `ON CONFLICT` and `INSERT IGNORE` changes below? What does Cassandra do? | temporalio-temporal | go |
@@ -107,7 +107,12 @@ func (s *SVIDStoreService) deleteSVID(ctx context.Context, log logrus.FieldLogge
return false
}
- log = log.WithField(telemetry.SVIDStore, storeName)
+ log = log.WithFields(logrus.Fields{
+ telemetry.SVIDStore: storeName,
+ telemetry.Entry: entry.EntryId,
+ telemetry.SPIFFEID: entry.SpiffeId,
+ })
+
svidStore, ok := s.cat.GetSVIDStoreNamed(storeName)
if !ok {
log.Error("Error deleting SVID: SVIDStore not found") | 1 | package store
import (
"context"
"crypto/x509"
"errors"
"fmt"
"time"
"github.com/andres-erbsen/clock"
"github.com/sirupsen/logrus"
"github.com/spiffe/go-spiffe/v2/spiffeid"
"github.com/spiffe/spire/pkg/agent/catalog"
"github.com/spiffe/spire/pkg/agent/manager/storecache"
"github.com/spiffe/spire/pkg/agent/plugin/svidstore"
"github.com/spiffe/spire/pkg/common/telemetry"
telemetry_store "github.com/spiffe/spire/pkg/common/telemetry/agent/store"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/proto/spire/common"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
defaultInterval = 5 * time.Second
)
type Cache interface {
// ReadyToStore is a list of store cache records that are ready to be stored on specific SVID Store
ReadyToStore() []*storecache.Record
// HandledRecord sets a revision to record on cache
HandledRecord(entry *common.RegistrationEntry, revision int64)
}
type Config struct {
Clk clock.Clock
Log logrus.FieldLogger
TrustDomain spiffeid.TrustDomain
Cache Cache
Catalog catalog.Catalog
Metrics telemetry.Metrics
}
type SVIDStoreService struct {
clk clock.Clock
log logrus.FieldLogger
// trustDomain is the trust domain of the agent
trustDomain spiffeid.TrustDomain
// cache is the store cache
cache Cache
cat catalog.Catalog
metrics telemetry.Metrics
hooks struct {
// test hook used to verify if a cycle finished
storeFinished chan struct{}
}
}
func New(c *Config) *SVIDStoreService {
clk := c.Clk
if clk == nil {
clk = clock.New()
}
return &SVIDStoreService{
cache: c.Cache,
clk: clk,
log: c.Log,
metrics: c.Metrics,
trustDomain: c.TrustDomain,
cat: c.Catalog,
}
}
// SetStoreFinishedHook used for testing only
func (s *SVIDStoreService) SetStoreFinishedHook(storeFinished chan struct{}) {
s.hooks.storeFinished = storeFinished
}
// Run starts SVID Store service
func (s *SVIDStoreService) Run(ctx context.Context) error {
timer := s.clk.Timer(defaultInterval)
defer timer.Stop()
for {
s.processRecords(ctx)
timer.Reset(defaultInterval)
select {
case <-timer.C:
case <-ctx.Done():
return nil
}
}
}
// deleteSVID deletes a stored SVID that uses the SVIDStore plugin. It gets the plugin name from entry selectors
func (s *SVIDStoreService) deleteSVID(ctx context.Context, log logrus.FieldLogger, entry *common.RegistrationEntry) bool {
log = log.WithFields(logrus.Fields{
telemetry.Entry: entry.EntryId,
telemetry.SPIFFEID: entry.SpiffeId,
})
storeName, metadata, err := getStoreNameWithMetadata(entry.Selectors)
if err != nil {
log.WithError(err).Error("Invalid store name in selectors")
return false
}
log = log.WithField(telemetry.SVIDStore, storeName)
svidStore, ok := s.cat.GetSVIDStoreNamed(storeName)
if !ok {
log.Error("Error deleting SVID: SVIDStore not found")
return false
}
err = svidStore.DeleteX509SVID(ctx, metadata)
switch status.Code(err) {
case codes.OK:
log.Debug("SVID deleted successfully")
return true
case codes.InvalidArgument:
log.WithError(err).Debug("Failed to delete SVID because of malformed selectors")
return true
default:
log.WithError(err).Error("Failed to delete SVID")
return false
}
}
// storeSVID creates or updates an SVID using SVIDStore plugin. It get the plugin name from entry selectors
func (s *SVIDStoreService) storeSVID(ctx context.Context, log logrus.FieldLogger, record *storecache.Record) {
if record.Svid == nil {
// Svid is not yet provided.
return
}
log = log.WithFields(logrus.Fields{
telemetry.Entry: record.Entry.EntryId,
telemetry.SPIFFEID: record.Entry.SpiffeId,
})
storeName, metadata, err := getStoreNameWithMetadata(record.Entry.Selectors)
if err != nil {
log.WithError(err).Error("Invalid store name in selectors")
return
}
log = log.WithField(telemetry.SVIDStore, storeName)
svidStore, ok := s.cat.GetSVIDStoreNamed(storeName)
if !ok {
log.Error("Error storing SVID: SVIDStore not found")
return
}
req, err := s.requestFromRecord(record, metadata)
if err != nil {
log.WithError(err).Error("Failed to parse record")
return
}
if err := svidStore.PutX509SVID(ctx, req); err != nil {
log.WithError(err).Error("Failed to put X509-SVID")
return
}
// Set revision, since SVID was updated successfully
s.cache.HandledRecord(record.Entry, record.Revision)
log.Debug("SVID stored successfully")
}
// TODO: may we change log.Error for debug?
func (s *SVIDStoreService) processRecords(ctx context.Context) {
counter := telemetry_store.StartStoreSVIDUpdates(s.metrics)
defer counter.Done(nil)
for _, record := range s.cache.ReadyToStore() {
log := s.log.WithField(telemetry.RevisionNumber, record.Revision)
// Check if entry is marked to be deleted
if record.Entry == nil {
// TODO: add a retry backoff
if s.deleteSVID(ctx, log, record.HandledEntry) {
// Deleted successfully. update revision
s.cache.HandledRecord(record.HandledEntry, record.Revision)
}
continue
}
// Entries with changes on selectors must be removed before SVID is stored.
if record.HandledEntry != nil {
// Verify if selector changed. If it changed, delete the SVID from store before updating
if !util.EqualsSelectors(record.Entry.Selectors, record.HandledEntry.Selectors) {
// TODO: add retry, and maybe fail update until it is deleted?
s.deleteSVID(ctx, log, record.HandledEntry)
}
}
s.storeSVID(ctx, log, record)
}
if s.hooks.storeFinished != nil {
s.hooks.storeFinished <- struct{}{}
}
}
// requestFromRecord parses a cache record to a *svidstore.X509SVID
func (s *SVIDStoreService) requestFromRecord(record *storecache.Record, metadata []string) (*svidstore.X509SVID, error) {
rootCA, ok := record.Bundles[s.trustDomain]
if !ok {
return nil, errors.New("no rootCA found")
}
federatedBundles := make(map[string][]*x509.Certificate)
for _, federatedID := range record.Entry.FederatesWith {
td, err := spiffeid.TrustDomainFromString(federatedID)
if err != nil {
// This is purely defensive since federatedID should be valid
continue
}
// Do not add the agent's trust domain to the federated bundles
if td == s.trustDomain {
continue
}
bundle, ok := record.Bundles[td]
if !ok {
// Federated bundle not found, no action taken
continue
}
federatedBundles[federatedID] = bundle.RootCAs()
}
spiffeID, err := spiffeid.FromString(record.Entry.SpiffeId)
if err != nil {
return nil, fmt.Errorf("failed to parse SPIFFE ID: %w", err)
}
return &svidstore.X509SVID{
Metadata: metadata,
SVID: &svidstore.SVID{
SPIFFEID: spiffeID,
Bundle: rootCA.RootCAs(),
CertChain: record.Svid.Chain,
PrivateKey: record.Svid.PrivateKey,
ExpiresAt: record.ExpiresAt,
},
FederatedBundles: federatedBundles,
}, nil
}
// getStoreNameWithMetadata gets SVIDStore plugin name from entry selectors and selectors metadata, it fails in case an entry
func getStoreNameWithMetadata(selectors []*common.Selector) (string, []string, error) {
if len(selectors) == 0 {
return "", nil, errors.New("no selectors found")
}
var metadata []string
name := selectors[0].Type
for _, s := range selectors {
if name != s.Type {
return "", nil, errors.New("selector contains multiple types")
}
metadata = append(metadata, s.Value)
}
return name, metadata, nil
}
| 1 | 18,145 | why are you replacing log here? it is defined above and contains EntryID and SPIFFEID | spiffe-spire | go |
@@ -34,11 +34,17 @@ type Command struct {
vpnServerFactory func(sessionManager session.Manager, serviceLocation dto_discovery.Location,
providerID identity.Identity, callback state.Callback) *openvpn.Server
- vpnServer *openvpn.Server
+ vpnServer *openvpn.Server
+ openvpnBinaryCheck func() error
}
// Start starts server - does not block
func (cmd *Command) Start() (err error) {
+ err = cmd.openvpnBinaryCheck()
+ if err != nil {
+ return err
+ }
+
providerID, err := cmd.identityLoader()
if err != nil {
return err | 1 | package server
import (
"errors"
log "github.com/cihub/seelog"
"github.com/mysterium/node/communication"
"github.com/mysterium/node/identity"
"github.com/mysterium/node/ip"
"github.com/mysterium/node/location"
"github.com/mysterium/node/nat"
"github.com/mysterium/node/openvpn"
"github.com/mysterium/node/openvpn/discovery"
"github.com/mysterium/node/openvpn/middlewares/state"
"github.com/mysterium/node/server"
dto_discovery "github.com/mysterium/node/service_discovery/dto"
"github.com/mysterium/node/session"
"time"
)
// Command represent entrypoint for Mysterium server with top level components
type Command struct {
identityLoader func() (identity.Identity, error)
createSigner identity.SignerFactory
ipResolver ip.Resolver
mysteriumClient server.Client
natService nat.NATService
locationDetector location.Detector
dialogWaiterFactory func(identity identity.Identity) communication.DialogWaiter
dialogWaiter communication.DialogWaiter
sessionManagerFactory func(serverIP string) session.Manager
vpnServerFactory func(sessionManager session.Manager, serviceLocation dto_discovery.Location,
providerID identity.Identity, callback state.Callback) *openvpn.Server
vpnServer *openvpn.Server
}
// Start starts server - does not block
func (cmd *Command) Start() (err error) {
providerID, err := cmd.identityLoader()
if err != nil {
return err
}
cmd.dialogWaiter = cmd.dialogWaiterFactory(providerID)
providerContact, err := cmd.dialogWaiter.Start()
// if for some reason we will need truly external IP, use GetPublicIP()
vpnServerIP, err := cmd.ipResolver.GetOutboundIP()
if err != nil {
return err
}
cmd.natService.Add(nat.RuleForwarding{
SourceAddress: "10.8.0.0/24",
TargetIP: vpnServerIP,
})
if err = cmd.natService.Start(); err != nil {
return err
}
serviceLocation, err := detectCountry(cmd.ipResolver, cmd.locationDetector)
if err != nil {
return err
}
proposal := discovery.NewServiceProposalWithLocation(providerID, providerContact, serviceLocation)
sessionManager := cmd.sessionManagerFactory(vpnServerIP)
dialogHandler := session.NewDialogHandler(proposal.ID, sessionManager)
if err := cmd.dialogWaiter.ServeDialogs(dialogHandler); err != nil {
return err
}
stopPinger := make(chan int)
vpnStateCallback := func(state openvpn.State) {
switch state {
case openvpn.ConnectedState:
log.Info("Open vpn service started")
case openvpn.ExitingState:
log.Info("Open vpn service exiting")
close(stopPinger)
}
}
cmd.vpnServer = cmd.vpnServerFactory(sessionManager, serviceLocation, providerID, vpnStateCallback)
if err := cmd.vpnServer.Start(); err != nil {
return err
}
signer := cmd.createSigner(providerID)
if err := cmd.mysteriumClient.RegisterProposal(proposal, signer); err != nil {
return err
}
go func() {
for {
select {
case <-time.After(1 * time.Minute):
err := cmd.mysteriumClient.PingProposal(proposal, signer)
if err != nil {
log.Error("Failed to ping proposal", err)
// do not stop server on missing ping to discovery. More on this in MYST-362 and MYST-370
}
case <-stopPinger:
log.Info("Stopping proposal pinger")
return
}
}
}()
return nil
}
func detectCountry(ipResolver ip.Resolver, locationDetector location.Detector) (dto_discovery.Location, error) {
myIP, err := ipResolver.GetPublicIP()
if err != nil {
return dto_discovery.Location{}, errors.New("IP detection failed: " + err.Error())
}
myCountry, err := locationDetector.DetectCountry(myIP)
if err != nil {
return dto_discovery.Location{}, errors.New("Country detection failed: " + err.Error())
}
log.Info("Country detected: ", myCountry)
return dto_discovery.Location{Country: myCountry}, nil
}
// Wait blocks until server is stopped
func (cmd *Command) Wait() error {
return cmd.vpnServer.Wait()
}
// Kill stops server
func (cmd *Command) Kill() error {
cmd.vpnServer.Stop()
err := cmd.dialogWaiter.Stop()
if err != nil {
return err
}
err = cmd.natService.Stop()
return err
}
| 1 | 10,635 | Why not `checkOpenvpn` as in client command? | mysteriumnetwork-node | go |
@@ -1,6 +1,11 @@
/*global mocha, console */
(function() {
'use strict';
+
+ if (!mocha || !mocha.reporter || !mocha.reporter('base')) {
+ return;
+ }
+
var Base = mocha.reporter('base')._reporter;
mocha.reporter(function(runner) {
Base.call(this, runner); | 1 | /*global mocha, console */
(function() {
'use strict';
var Base = mocha.reporter('base')._reporter;
mocha.reporter(function(runner) {
Base.call(this, runner);
var passes = 0;
var failures = 0;
runner.on('pass', function(test) {
passes++;
console.log('pass: %s', test.fullTitle());
});
runner.on('fail', function(test, err) {
failures++;
console.error('fail: %s -- error: %s', test.fullTitle(), err.message);
});
runner.on('end', function() {
console.log('end: %d/%d', passes, passes + failures);
var mochaFixture = document.getElementById('mocha');
if (mochaFixture) {
var html = '<div style="color: ' + (failures ? 'red' : 'green') + '">';
html += passes + '/' + (failures + passes);
html += ' tests passed</div>';
mochaFixture.innerHTML = html;
}
});
});
})();
| 1 | 15,220 | > Note: Non-headless tests (that run on selenium-webdriver) need to wire up to mocha reporter to collect all the results and report them. (See `test-webdriver.js`). But for other tests this can be skipped. | dequelabs-axe-core | js |
@@ -241,8 +241,8 @@ class Pathoc(tcp.TCPClient):
def http_connect(self, connect_to):
self.wfile.write(
- 'CONNECT %s:%s HTTP/1.1\r\n' % tuple(connect_to) +
- '\r\n'
+ b'CONNECT %s:%d HTTP/1.1\r\n' % (connect_to[0].encode(), connect_to[1]) +
+ b'\r\n'
)
self.wfile.flush()
try: | 1 | from __future__ import print_function
import contextlib
import sys
import os
import itertools
import hashlib
from six.moves import queue
import random
import select
import time
import OpenSSL.crypto
import six
from netlib import tcp, certutils, websockets, socks
from netlib import exceptions
from netlib.http import http1
from netlib.http import http2
from netlib import basethread
from pathod import log, language
import logging
from netlib.tutils import treq
from netlib import strutils
logging.getLogger("hpack").setLevel(logging.WARNING)
def xrepr(s):
return repr(s)[1:-1]
class PathocError(Exception):
pass
class SSLInfo(object):
def __init__(self, certchain, cipher, alp):
self.certchain, self.cipher, self.alp = certchain, cipher, alp
def __str__(self):
parts = [
"Application Layer Protocol: %s" % strutils.native(self.alp, "utf8"),
"Cipher: %s, %s bit, %s" % self.cipher,
"SSL certificate chain:"
]
for n, i in enumerate(self.certchain):
parts.append(" Certificate [%s]" % n)
parts.append("\tSubject: ")
for cn in i.get_subject().get_components():
parts.append("\t\t%s=%s" % (
strutils.native(cn[0], "utf8"),
strutils.native(cn[1], "utf8"))
)
parts.append("\tIssuer: ")
for cn in i.get_issuer().get_components():
parts.append("\t\t%s=%s" % (
strutils.native(cn[0], "utf8"),
strutils.native(cn[1], "utf8"))
)
parts.extend(
[
"\tVersion: %s" % i.get_version(),
"\tValidity: %s - %s" % (
strutils.native(i.get_notBefore(), "utf8"),
strutils.native(i.get_notAfter(), "utf8")
),
"\tSerial: %s" % i.get_serial_number(),
"\tAlgorithm: %s" % strutils.native(i.get_signature_algorithm(), "utf8")
]
)
pk = i.get_pubkey()
types = {
OpenSSL.crypto.TYPE_RSA: "RSA",
OpenSSL.crypto.TYPE_DSA: "DSA"
}
t = types.get(pk.type(), "Uknown")
parts.append("\tPubkey: %s bit %s" % (pk.bits(), t))
s = certutils.SSLCert(i)
if s.altnames:
parts.append("\tSANs: %s" % " ".join(strutils.native(n, "utf8") for n in s.altnames))
return "\n".join(parts)
class WebsocketFrameReader(basethread.BaseThread):
def __init__(
self,
rfile,
logfp,
showresp,
hexdump,
ws_read_limit,
timeout
):
basethread.BaseThread.__init__(self, "WebsocketFrameReader")
self.timeout = timeout
self.ws_read_limit = ws_read_limit
self.logfp = logfp
self.showresp = showresp
self.hexdump = hexdump
self.rfile = rfile
self.terminate = queue.Queue()
self.frames_queue = queue.Queue()
self.logger = log.ConnectionLogger(
self.logfp,
self.hexdump,
False,
rfile if showresp else None,
None
)
@contextlib.contextmanager
def terminator(self):
yield
self.frames_queue.put(None)
def run(self):
starttime = time.time()
with self.terminator():
while True:
if self.ws_read_limit == 0:
return
r, _, _ = select.select([self.rfile], [], [], 0.05)
delta = time.time() - starttime
if not r and self.timeout and delta > self.timeout:
return
try:
self.terminate.get_nowait()
return
except queue.Empty:
pass
for rfile in r:
with self.logger.ctx() as log:
try:
frm = websockets.Frame.from_file(self.rfile)
except exceptions.TcpDisconnect:
return
self.frames_queue.put(frm)
log("<< %s" % frm.header.human_readable())
if self.ws_read_limit is not None:
self.ws_read_limit -= 1
starttime = time.time()
class Pathoc(tcp.TCPClient):
def __init__(
self,
address,
# SSL
ssl=None,
sni=None,
ssl_version=tcp.SSL_DEFAULT_METHOD,
ssl_options=tcp.SSL_DEFAULT_OPTIONS,
clientcert=None,
ciphers=None,
# HTTP/2
use_http2=False,
http2_skip_connection_preface=False,
http2_framedump=False,
# Websockets
ws_read_limit=None,
# Network
timeout=None,
# Output control
showreq=False,
showresp=False,
explain=False,
hexdump=False,
ignorecodes=(),
ignoretimeout=False,
showsummary=False,
fp=sys.stdout
):
"""
spec: A request specification
showreq: Print requests
showresp: Print responses
explain: Print request explanation
showssl: Print info on SSL connection
hexdump: When printing requests or responses, use hex dump output
showsummary: Show a summary of requests
ignorecodes: Sequence of return codes to ignore
"""
tcp.TCPClient.__init__(self, address)
self.ssl, self.sni = ssl, sni
self.clientcert = clientcert
self.ssl_version = ssl_version
self.ssl_options = ssl_options
self.ciphers = ciphers
self.sslinfo = None
self.use_http2 = use_http2
self.http2_skip_connection_preface = http2_skip_connection_preface
self.http2_framedump = http2_framedump
self.ws_read_limit = ws_read_limit
self.timeout = timeout
self.showreq = showreq
self.showresp = showresp
self.explain = explain
self.hexdump = hexdump
self.ignorecodes = ignorecodes
self.ignoretimeout = ignoretimeout
self.showsummary = showsummary
self.fp = fp
self.ws_framereader = None
if self.use_http2:
if not tcp.HAS_ALPN: # pragma: no cover
log.write_raw(
self.fp,
"HTTP/2 requires ALPN support. "
"Please use OpenSSL >= 1.0.2. "
"Pathoc might not be working as expected without ALPN.",
timestamp=False
)
self.protocol = http2.HTTP2Protocol(self, dump_frames=self.http2_framedump)
else:
self.protocol = http1
self.settings = language.Settings(
is_client=True,
staticdir=os.getcwd(),
unconstrained_file_access=True,
request_host=self.address.host,
protocol=self.protocol,
)
def http_connect(self, connect_to):
self.wfile.write(
'CONNECT %s:%s HTTP/1.1\r\n' % tuple(connect_to) +
'\r\n'
)
self.wfile.flush()
try:
resp = self.protocol.read_response(self.rfile, treq(method=b"CONNECT"))
if resp.status_code != 200:
raise exceptions.HttpException("Unexpected status code: %s" % resp.status_code)
except exceptions.HttpException as e:
six.reraise(PathocError, PathocError(
"Proxy CONNECT failed: %s" % repr(e)
))
def socks_connect(self, connect_to):
try:
client_greet = socks.ClientGreeting(
socks.VERSION.SOCKS5,
[socks.METHOD.NO_AUTHENTICATION_REQUIRED]
)
client_greet.to_file(self.wfile)
self.wfile.flush()
server_greet = socks.ServerGreeting.from_file(self.rfile)
server_greet.assert_socks5()
if server_greet.method != socks.METHOD.NO_AUTHENTICATION_REQUIRED:
raise socks.SocksError(
socks.METHOD.NO_ACCEPTABLE_METHODS,
"pathoc only supports SOCKS without authentication"
)
connect_request = socks.Message(
socks.VERSION.SOCKS5,
socks.CMD.CONNECT,
socks.ATYP.DOMAINNAME,
tcp.Address.wrap(connect_to)
)
connect_request.to_file(self.wfile)
self.wfile.flush()
connect_reply = socks.Message.from_file(self.rfile)
connect_reply.assert_socks5()
if connect_reply.msg != socks.REP.SUCCEEDED:
raise socks.SocksError(
connect_reply.msg,
"SOCKS server error"
)
except (socks.SocksError, exceptions.TcpDisconnect) as e:
raise PathocError(str(e))
def connect(self, connect_to=None, showssl=False, fp=sys.stdout):
"""
connect_to: A (host, port) tuple, which will be connected to with
an HTTP CONNECT request.
"""
if self.use_http2 and not self.ssl:
raise NotImplementedError("HTTP2 without SSL is not supported.")
with tcp.TCPClient.connect(self) as closer:
if connect_to:
self.http_connect(connect_to)
self.sslinfo = None
if self.ssl:
try:
alpn_protos = [b'http/1.1']
if self.use_http2:
alpn_protos.append(b'h2')
self.convert_to_ssl(
sni=self.sni,
cert=self.clientcert,
method=self.ssl_version,
options=self.ssl_options,
cipher_list=self.ciphers,
alpn_protos=alpn_protos
)
except exceptions.TlsException as v:
raise PathocError(str(v))
self.sslinfo = SSLInfo(
self.connection.get_peer_cert_chain(),
self.get_current_cipher(),
self.get_alpn_proto_negotiated()
)
if showssl:
print(str(self.sslinfo), file=fp)
if self.use_http2:
self.protocol.check_alpn()
if not self.http2_skip_connection_preface:
self.protocol.perform_client_connection_preface()
if self.timeout:
self.settimeout(self.timeout)
return closer.pop()
def stop(self):
if self.ws_framereader:
self.ws_framereader.terminate.put(None)
def wait(self, timeout=0.01, finish=True):
"""
A generator that yields frames until Pathoc terminates.
timeout: If specified None may be yielded instead if timeout is
reached. If timeout is None, wait forever. If timeout is 0, return
immedately if nothing is on the queue.
finish: If true, consume messages until the reader shuts down.
Otherwise, return None on timeout.
"""
if self.ws_framereader:
while True:
try:
frm = self.ws_framereader.frames_queue.get(
timeout=timeout,
block=True if timeout != 0 else False
)
except queue.Empty:
if finish:
continue
else:
return
if frm is None:
self.ws_framereader.join()
self.ws_framereader = None
return
yield frm
def websocket_send_frame(self, r):
"""
Sends a single websocket frame.
"""
logger = log.ConnectionLogger(
self.fp,
self.hexdump,
False,
None,
self.wfile if self.showreq else None,
)
with logger.ctx() as lg:
lg(">> %s" % r)
language.serve(r, self.wfile, self.settings)
self.wfile.flush()
def websocket_start(self, r):
"""
Performs an HTTP request, and attempts to drop into websocket
connection.
"""
resp = self.http(r)
if resp.status_code == 101:
self.ws_framereader = WebsocketFrameReader(
self.rfile,
self.fp,
self.showresp,
self.hexdump,
self.ws_read_limit,
self.timeout
)
self.ws_framereader.start()
return resp
def http(self, r):
"""
Performs a single request.
r: A language.http.Request object, or a string representing one
request.
Returns Response if we have a non-ignored response.
May raise a exceptions.NetlibException
"""
logger = log.ConnectionLogger(
self.fp,
self.hexdump,
False,
self.rfile if self.showresp else None,
self.wfile if self.showreq else None,
)
with logger.ctx() as lg:
lg(">> %s" % r)
resp, req = None, None
try:
req = language.serve(r, self.wfile, self.settings)
self.wfile.flush()
resp = self.protocol.read_response(self.rfile, treq(method=req["method"].encode()))
resp.sslinfo = self.sslinfo
except exceptions.HttpException as v:
lg("Invalid server response: %s" % v)
raise
except exceptions.TcpTimeout:
if self.ignoretimeout:
lg("Timeout (ignored)")
return None
lg("Timeout")
raise
finally:
if resp:
lg("<< %s %s: %s bytes" % (
resp.status_code, strutils.bytes_to_escaped_str(resp.reason.encode()), len(resp.content)
))
if resp.status_code in self.ignorecodes:
lg.suppress()
return resp
def request(self, r):
"""
Performs a single request.
r: A language.message.Messsage object, or a string representing
one.
Returns Response if we have a non-ignored response.
May raise a exceptions.NetlibException
"""
if isinstance(r, six.string_types):
r = next(language.parse_pathoc(r, self.use_http2))
if isinstance(r, language.http.Request):
if r.ws:
return self.websocket_start(r)
else:
return self.http(r)
elif isinstance(r, language.websockets.WebsocketFrame):
self.websocket_send_frame(r)
elif isinstance(r, language.http2.Request):
return self.http(r)
# elif isinstance(r, language.http2.Frame):
# TODO: do something
def main(args): # pragma: no cover
memo = set([])
trycount = 0
p = None
try:
cnt = 0
while True:
if cnt == args.repeat and args.repeat != 0:
break
if args.wait and cnt != 0:
time.sleep(args.wait)
cnt += 1
playlist = itertools.chain(*args.requests)
if args.random:
playlist = random.choice(args.requests)
p = Pathoc(
(args.host, args.port),
ssl=args.ssl,
sni=args.sni,
ssl_version=args.ssl_version,
ssl_options=args.ssl_options,
clientcert=args.clientcert,
ciphers=args.ciphers,
use_http2=args.use_http2,
http2_skip_connection_preface=args.http2_skip_connection_preface,
http2_framedump=args.http2_framedump,
showreq=args.showreq,
showresp=args.showresp,
explain=args.explain,
hexdump=args.hexdump,
ignorecodes=args.ignorecodes,
timeout=args.timeout,
ignoretimeout=args.ignoretimeout,
showsummary=True
)
trycount = 0
try:
with p.connect(args.connect_to, args.showssl):
for spec in playlist:
if args.explain or args.memo:
spec = spec.freeze(p.settings)
if args.memo:
h = hashlib.sha256(spec.spec()).digest()
if h not in memo:
trycount = 0
memo.add(h)
else:
trycount += 1
if trycount > args.memolimit:
print("Memo limit exceeded...", file=sys.stderr)
return
else:
continue
try:
ret = p.request(spec)
if ret and args.oneshot:
return
# We consume the queue when we can, so it doesn't build up.
for i_ in p.wait(timeout=0, finish=False):
pass
except exceptions.NetlibException:
break
for i_ in p.wait(timeout=0.01, finish=True):
pass
except exceptions.TcpException as v:
print(str(v), file=sys.stderr)
continue
except PathocError as v:
print(str(v), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
if p:
p.stop()
| 1 | 11,700 | I think we should apply IDNA encoding for the host here. | mitmproxy-mitmproxy | py |
@@ -10127,8 +10127,10 @@ void Client::Handle_OP_PetCommands(const EQApplicationPacket *app)
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
mypet->CastToNPC()->Depop();
+ this->Save(1);
//Oddly, the client (Titanium) will still allow "/pet get lost" command despite me adding the code below. If someone can figure that out, you can uncomment this code and use it.
+ // on live since aa merging to companions discipline /pet get lost is available without any aa requirement.
/*
if((mypet->GetPetType() == petAnimation && GetAA(aaAnimationEmpathy) >= 2) || mypet->GetPetType() != petAnimation) {
mypet->SayString(PET_GETLOST_STRING); | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2016 EQEMu Development Team (http://eqemulator.net)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "../common/global_define.h"
#include "../common/eqemu_logsys.h"
#include "../common/opcodemgr.h"
#include <iomanip>
#include <iostream>
#include <math.h>
#include <set>
#include <stdio.h>
#include <string.h>
#include <zlib.h>
#ifdef _WINDOWS
#define snprintf _snprintf
#define strncasecmp _strnicmp
#define strcasecmp _stricmp
#else
#include <pthread.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <unistd.h>
#endif
#include "../common/crc32.h"
#include "../common/data_verification.h"
#include "../common/faction.h"
#include "../common/guilds.h"
#include "../common/rdtsc.h"
#include "../common/rulesys.h"
#include "../common/skills.h"
#include "../common/spdat.h"
#include "../common/string_util.h"
#include "../common/zone_numbers.h"
#include "data_bucket.h"
#include "event_codes.h"
#include "guild_mgr.h"
#include "merc.h"
#include "petitions.h"
#include "pets.h"
#include "queryserv.h"
#include "quest_parser_collection.h"
#include "string_ids.h"
#include "titles.h"
#include "water_map.h"
#include "worldserver.h"
#include "zone.h"
#include "mob_movement_manager.h"
#ifdef BOTS
#include "bot.h"
#endif
extern QueryServ* QServ;
extern Zone* zone;
extern volatile bool is_zone_loaded;
extern WorldServer worldserver;
extern PetitionList petition_list;
extern EntityList entity_list;
typedef void (Client::*ClientPacketProc)(const EQApplicationPacket *app);
//Use a map for connecting opcodes since it dosent get used a lot and is sparse
std::map<uint32, ClientPacketProc> ConnectingOpcodes;
//Use a static array for connected, for speed
ClientPacketProc ConnectedOpcodes[_maxEmuOpcode];
void MapOpcodes()
{
ConnectingOpcodes.clear();
memset(ConnectedOpcodes, 0, sizeof(ConnectedOpcodes));
// Now put all the opcodes into their home...
// connecting opcode handler assignments:
ConnectingOpcodes[OP_ApproveZone] = &Client::Handle_Connect_OP_ApproveZone;
ConnectingOpcodes[OP_BlockedBuffs] = &Client::Handle_OP_BlockedBuffs;
ConnectingOpcodes[OP_ClientError] = &Client::Handle_Connect_OP_ClientError;
ConnectingOpcodes[OP_ClientReady] = &Client::Handle_Connect_OP_ClientReady;
ConnectingOpcodes[OP_ClientUpdate] = &Client::Handle_Connect_OP_ClientUpdate;
ConnectingOpcodes[OP_GetGuildsList] = &Client::Handle_OP_GetGuildsList; // temporary hack
ConnectingOpcodes[OP_ReqClientSpawn] = &Client::Handle_Connect_OP_ReqClientSpawn;
ConnectingOpcodes[OP_ReqNewZone] = &Client::Handle_Connect_OP_ReqNewZone;
ConnectingOpcodes[OP_SendAAStats] = &Client::Handle_Connect_OP_SendAAStats;
ConnectingOpcodes[OP_SendAATable] = &Client::Handle_Connect_OP_SendAATable;
ConnectingOpcodes[OP_SendExpZonein] = &Client::Handle_Connect_OP_SendExpZonein;
ConnectingOpcodes[OP_SendGuildTributes] = &Client::Handle_Connect_OP_SendGuildTributes;
ConnectingOpcodes[OP_SendGuildTributes] = &Client::Handle_Connect_OP_SendGuildTributes; // I guess it didn't believe us with the first assignment?
ConnectingOpcodes[OP_SendTributes] = &Client::Handle_Connect_OP_SendTributes;
ConnectingOpcodes[OP_SetServerFilter] = &Client::Handle_Connect_OP_SetServerFilter;
ConnectingOpcodes[OP_SpawnAppearance] = &Client::Handle_Connect_OP_SpawnAppearance;
ConnectingOpcodes[OP_TGB] = &Client::Handle_Connect_OP_TGB;
ConnectingOpcodes[OP_UpdateAA] = &Client::Handle_Connect_OP_UpdateAA;
ConnectingOpcodes[OP_WearChange] = &Client::Handle_Connect_OP_WearChange;
ConnectingOpcodes[OP_WorldObjectsSent] = &Client::Handle_Connect_OP_WorldObjectsSent;
ConnectingOpcodes[OP_XTargetAutoAddHaters] = &Client::Handle_OP_XTargetAutoAddHaters;
ConnectingOpcodes[OP_XTargetRequest] = &Client::Handle_OP_XTargetRequest;
ConnectingOpcodes[OP_ZoneComplete] = &Client::Handle_Connect_OP_ZoneComplete;
ConnectingOpcodes[OP_ZoneEntry] = &Client::Handle_Connect_OP_ZoneEntry;
// connected opcode handler assignments:
ConnectedOpcodes[OP_0x0193] = &Client::Handle_0x0193;
ConnectedOpcodes[OP_AAAction] = &Client::Handle_OP_AAAction;
ConnectedOpcodes[OP_AcceptNewTask] = &Client::Handle_OP_AcceptNewTask;
ConnectedOpcodes[OP_AdventureInfoRequest] = &Client::Handle_OP_AdventureInfoRequest;
ConnectedOpcodes[OP_AdventureLeaderboardRequest] = &Client::Handle_OP_AdventureLeaderboardRequest;
ConnectedOpcodes[OP_AdventureMerchantPurchase] = &Client::Handle_OP_AdventureMerchantPurchase;
ConnectedOpcodes[OP_AdventureMerchantRequest] = &Client::Handle_OP_AdventureMerchantRequest;
ConnectedOpcodes[OP_AdventureMerchantSell] = &Client::Handle_OP_AdventureMerchantSell;
ConnectedOpcodes[OP_AdventureRequest] = &Client::Handle_OP_AdventureRequest;
ConnectedOpcodes[OP_AdventureStatsRequest] = &Client::Handle_OP_AdventureStatsRequest;
ConnectedOpcodes[OP_AggroMeterLockTarget] = &Client::Handle_OP_AggroMeterLockTarget;
ConnectedOpcodes[OP_AltCurrencyMerchantRequest] = &Client::Handle_OP_AltCurrencyMerchantRequest;
ConnectedOpcodes[OP_AltCurrencyPurchase] = &Client::Handle_OP_AltCurrencyPurchase;
ConnectedOpcodes[OP_AltCurrencyReclaim] = &Client::Handle_OP_AltCurrencyReclaim;
ConnectedOpcodes[OP_AltCurrencySell] = &Client::Handle_OP_AltCurrencySell;
ConnectedOpcodes[OP_AltCurrencySellSelection] = &Client::Handle_OP_AltCurrencySellSelection;
ConnectedOpcodes[OP_Animation] = &Client::Handle_OP_Animation;
ConnectedOpcodes[OP_ApplyPoison] = &Client::Handle_OP_ApplyPoison;
ConnectedOpcodes[OP_Assist] = &Client::Handle_OP_Assist;
ConnectedOpcodes[OP_AssistGroup] = &Client::Handle_OP_AssistGroup;
ConnectedOpcodes[OP_AugmentInfo] = &Client::Handle_OP_AugmentInfo;
ConnectedOpcodes[OP_AugmentItem] = &Client::Handle_OP_AugmentItem;
ConnectedOpcodes[OP_AutoAttack] = &Client::Handle_OP_AutoAttack;
ConnectedOpcodes[OP_AutoAttack2] = &Client::Handle_OP_AutoAttack2;
ConnectedOpcodes[OP_AutoFire] = &Client::Handle_OP_AutoFire;
ConnectedOpcodes[OP_Bandolier] = &Client::Handle_OP_Bandolier;
ConnectedOpcodes[OP_BankerChange] = &Client::Handle_OP_BankerChange;
ConnectedOpcodes[OP_Barter] = &Client::Handle_OP_Barter;
ConnectedOpcodes[OP_BazaarInspect] = &Client::Handle_OP_BazaarInspect;
ConnectedOpcodes[OP_BazaarSearch] = &Client::Handle_OP_BazaarSearch;
ConnectedOpcodes[OP_Begging] = &Client::Handle_OP_Begging;
ConnectedOpcodes[OP_Bind_Wound] = &Client::Handle_OP_Bind_Wound;
ConnectedOpcodes[OP_BlockedBuffs] = &Client::Handle_OP_BlockedBuffs;
ConnectedOpcodes[OP_BoardBoat] = &Client::Handle_OP_BoardBoat;
ConnectedOpcodes[OP_Buff] = &Client::Handle_OP_Buff;
ConnectedOpcodes[OP_BuffRemoveRequest] = &Client::Handle_OP_BuffRemoveRequest;
ConnectedOpcodes[OP_Bug] = &Client::Handle_OP_Bug;
ConnectedOpcodes[OP_Camp] = &Client::Handle_OP_Camp;
ConnectedOpcodes[OP_CancelTask] = &Client::Handle_OP_CancelTask;
ConnectedOpcodes[OP_CancelTrade] = &Client::Handle_OP_CancelTrade;
ConnectedOpcodes[OP_CastSpell] = &Client::Handle_OP_CastSpell;
ConnectedOpcodes[OP_ChannelMessage] = &Client::Handle_OP_ChannelMessage;
ConnectedOpcodes[OP_ClearBlockedBuffs] = &Client::Handle_OP_ClearBlockedBuffs;
ConnectedOpcodes[OP_ClearNPCMarks] = &Client::Handle_OP_ClearNPCMarks;
ConnectedOpcodes[OP_ClearSurname] = &Client::Handle_OP_ClearSurname;
ConnectedOpcodes[OP_ClickDoor] = &Client::Handle_OP_ClickDoor;
ConnectedOpcodes[OP_ClickObject] = &Client::Handle_OP_ClickObject;
ConnectedOpcodes[OP_ClickObjectAction] = &Client::Handle_OP_ClickObjectAction;
ConnectedOpcodes[OP_ClientError] = &Client::Handle_OP_ClientError;
ConnectedOpcodes[OP_ClientTimeStamp] = &Client::Handle_OP_ClientTimeStamp;
ConnectedOpcodes[OP_ClientUpdate] = &Client::Handle_OP_ClientUpdate;
ConnectedOpcodes[OP_CombatAbility] = &Client::Handle_OP_CombatAbility;
ConnectedOpcodes[OP_ConfirmDelete] = &Client::Handle_OP_ConfirmDelete;
ConnectedOpcodes[OP_Consent] = &Client::Handle_OP_Consent;
ConnectedOpcodes[OP_ConsentDeny] = &Client::Handle_OP_ConsentDeny;
ConnectedOpcodes[OP_Consider] = &Client::Handle_OP_Consider;
ConnectedOpcodes[OP_ConsiderCorpse] = &Client::Handle_OP_ConsiderCorpse;
ConnectedOpcodes[OP_Consume] = &Client::Handle_OP_Consume;
ConnectedOpcodes[OP_ControlBoat] = &Client::Handle_OP_ControlBoat;
ConnectedOpcodes[OP_CorpseDrag] = &Client::Handle_OP_CorpseDrag;
ConnectedOpcodes[OP_CorpseDrop] = &Client::Handle_OP_CorpseDrop;
ConnectedOpcodes[OP_CrashDump] = &Client::Handle_OP_CrashDump;
ConnectedOpcodes[OP_CrystalCreate] = &Client::Handle_OP_CrystalCreate;
ConnectedOpcodes[OP_CrystalReclaim] = &Client::Handle_OP_CrystalReclaim;
ConnectedOpcodes[OP_Damage] = &Client::Handle_OP_Damage;
ConnectedOpcodes[OP_Death] = &Client::Handle_OP_Death;
ConnectedOpcodes[OP_DelegateAbility] = &Client::Handle_OP_DelegateAbility;
ConnectedOpcodes[OP_DeleteItem] = &Client::Handle_OP_DeleteItem;
ConnectedOpcodes[OP_DeleteSpawn] = &Client::Handle_OP_DeleteSpawn;
ConnectedOpcodes[OP_DeleteSpell] = &Client::Handle_OP_DeleteSpell;
ConnectedOpcodes[OP_Disarm] = &Client::Handle_OP_Disarm;
ConnectedOpcodes[OP_DisarmTraps] = &Client::Handle_OP_DisarmTraps;
ConnectedOpcodes[OP_DoGroupLeadershipAbility] = &Client::Handle_OP_DoGroupLeadershipAbility;
ConnectedOpcodes[OP_DuelResponse] = &Client::Handle_OP_DuelResponse;
ConnectedOpcodes[OP_DuelResponse2] = &Client::Handle_OP_DuelResponse2;
ConnectedOpcodes[OP_DumpName] = &Client::Handle_OP_DumpName;
ConnectedOpcodes[OP_Dye] = &Client::Handle_OP_Dye;
ConnectedOpcodes[OP_Emote] = &Client::Handle_OP_Emote;
ConnectedOpcodes[OP_EndLootRequest] = &Client::Handle_OP_EndLootRequest;
ConnectedOpcodes[OP_EnvDamage] = &Client::Handle_OP_EnvDamage;
ConnectedOpcodes[OP_FaceChange] = &Client::Handle_OP_FaceChange;
ConnectedOpcodes[OP_FeignDeath] = &Client::Handle_OP_FeignDeath;
ConnectedOpcodes[OP_FindPersonRequest] = &Client::Handle_OP_FindPersonRequest;
ConnectedOpcodes[OP_Fishing] = &Client::Handle_OP_Fishing;
ConnectedOpcodes[OP_FloatListThing] = &Client::Handle_OP_Ignore;
ConnectedOpcodes[OP_Forage] = &Client::Handle_OP_Forage;
ConnectedOpcodes[OP_FriendsWho] = &Client::Handle_OP_FriendsWho;
ConnectedOpcodes[OP_GetGuildMOTD] = &Client::Handle_OP_GetGuildMOTD;
ConnectedOpcodes[OP_GetGuildsList] = &Client::Handle_OP_GetGuildsList;
ConnectedOpcodes[OP_GMBecomeNPC] = &Client::Handle_OP_GMBecomeNPC;
ConnectedOpcodes[OP_GMDelCorpse] = &Client::Handle_OP_GMDelCorpse;
ConnectedOpcodes[OP_GMEmoteZone] = &Client::Handle_OP_GMEmoteZone;
ConnectedOpcodes[OP_GMEndTraining] = &Client::Handle_OP_GMEndTraining;
ConnectedOpcodes[OP_GMFind] = &Client::Handle_OP_GMFind;
ConnectedOpcodes[OP_GMGoto] = &Client::Handle_OP_GMGoto;
ConnectedOpcodes[OP_GMHideMe] = &Client::Handle_OP_GMHideMe;
ConnectedOpcodes[OP_GMKick] = &Client::Handle_OP_GMKick;
ConnectedOpcodes[OP_GMKill] = &Client::Handle_OP_GMKill;
ConnectedOpcodes[OP_GMLastName] = &Client::Handle_OP_GMLastName;
ConnectedOpcodes[OP_GMNameChange] = &Client::Handle_OP_GMNameChange;
ConnectedOpcodes[OP_GMSearchCorpse] = &Client::Handle_OP_GMSearchCorpse;
ConnectedOpcodes[OP_GMServers] = &Client::Handle_OP_GMServers;
ConnectedOpcodes[OP_GMSummon] = &Client::Handle_OP_GMSummon;
ConnectedOpcodes[OP_GMToggle] = &Client::Handle_OP_GMToggle;
ConnectedOpcodes[OP_GMTraining] = &Client::Handle_OP_GMTraining;
ConnectedOpcodes[OP_GMTrainSkill] = &Client::Handle_OP_GMTrainSkill;
ConnectedOpcodes[OP_GMZoneRequest] = &Client::Handle_OP_GMZoneRequest;
ConnectedOpcodes[OP_GMZoneRequest2] = &Client::Handle_OP_GMZoneRequest2;
ConnectedOpcodes[OP_GroundSpawn] = &Client::Handle_OP_CreateObject;
ConnectedOpcodes[OP_GroupAcknowledge] = &Client::Handle_OP_GroupAcknowledge;
ConnectedOpcodes[OP_GroupCancelInvite] = &Client::Handle_OP_GroupCancelInvite;
ConnectedOpcodes[OP_GroupDelete] = &Client::Handle_OP_GroupDelete;
ConnectedOpcodes[OP_GroupDisband] = &Client::Handle_OP_GroupDisband;
ConnectedOpcodes[OP_GroupFollow] = &Client::Handle_OP_GroupFollow;
ConnectedOpcodes[OP_GroupFollow2] = &Client::Handle_OP_GroupFollow2;
ConnectedOpcodes[OP_GroupInvite] = &Client::Handle_OP_GroupInvite;
ConnectedOpcodes[OP_GroupInvite2] = &Client::Handle_OP_GroupInvite2;
ConnectedOpcodes[OP_GroupMakeLeader] = &Client::Handle_OP_GroupMakeLeader;
ConnectedOpcodes[OP_GroupMentor] = &Client::Handle_OP_GroupMentor;
ConnectedOpcodes[OP_GroupRoles] = &Client::Handle_OP_GroupRoles;
ConnectedOpcodes[OP_GroupUpdate] = &Client::Handle_OP_GroupUpdate;
ConnectedOpcodes[OP_GuildBank] = &Client::Handle_OP_GuildBank;
ConnectedOpcodes[OP_GuildCreate] = &Client::Handle_OP_GuildCreate;
ConnectedOpcodes[OP_GuildDelete] = &Client::Handle_OP_GuildDelete;
ConnectedOpcodes[OP_GuildDemote] = &Client::Handle_OP_GuildDemote;
ConnectedOpcodes[OP_GuildInvite] = &Client::Handle_OP_GuildInvite;
ConnectedOpcodes[OP_GuildInviteAccept] = &Client::Handle_OP_GuildInviteAccept;
ConnectedOpcodes[OP_GuildLeader] = &Client::Handle_OP_GuildLeader;
ConnectedOpcodes[OP_GuildManageBanker] = &Client::Handle_OP_GuildManageBanker;
ConnectedOpcodes[OP_GuildPeace] = &Client::Handle_OP_GuildPeace;
ConnectedOpcodes[OP_GuildPromote] = &Client::Handle_OP_GuildPromote;
ConnectedOpcodes[OP_GuildPublicNote] = &Client::Handle_OP_GuildPublicNote;
ConnectedOpcodes[OP_GuildRemove] = &Client::Handle_OP_GuildRemove;
ConnectedOpcodes[OP_GuildStatus] = &Client::Handle_OP_GuildStatus;
ConnectedOpcodes[OP_GuildUpdateURLAndChannel] = &Client::Handle_OP_GuildUpdateURLAndChannel;
ConnectedOpcodes[OP_GuildWar] = &Client::Handle_OP_GuildWar;
ConnectedOpcodes[OP_Heartbeat] = &Client::Handle_OP_Heartbeat;
ConnectedOpcodes[OP_Hide] = &Client::Handle_OP_Hide;
ConnectedOpcodes[OP_HideCorpse] = &Client::Handle_OP_HideCorpse;
ConnectedOpcodes[OP_Illusion] = &Client::Handle_OP_Illusion;
ConnectedOpcodes[OP_InspectAnswer] = &Client::Handle_OP_InspectAnswer;
ConnectedOpcodes[OP_InspectMessageUpdate] = &Client::Handle_OP_InspectMessageUpdate;
ConnectedOpcodes[OP_InspectRequest] = &Client::Handle_OP_InspectRequest;
ConnectedOpcodes[OP_InstillDoubt] = &Client::Handle_OP_InstillDoubt;
ConnectedOpcodes[OP_ItemLinkClick] = &Client::Handle_OP_ItemLinkClick;
ConnectedOpcodes[OP_ItemLinkResponse] = &Client::Handle_OP_ItemLinkResponse;
ConnectedOpcodes[OP_ItemName] = &Client::Handle_OP_ItemName;
ConnectedOpcodes[OP_ItemPreview] = &Client::Handle_OP_ItemPreview;
ConnectedOpcodes[OP_ItemVerifyRequest] = &Client::Handle_OP_ItemVerifyRequest;
ConnectedOpcodes[OP_ItemViewUnknown] = &Client::Handle_OP_Ignore;
ConnectedOpcodes[OP_Jump] = &Client::Handle_OP_Jump;
ConnectedOpcodes[OP_KeyRing] = &Client::Handle_OP_KeyRing;
ConnectedOpcodes[OP_LDoNButton] = &Client::Handle_OP_LDoNButton;
ConnectedOpcodes[OP_LDoNDisarmTraps] = &Client::Handle_OP_LDoNDisarmTraps;
ConnectedOpcodes[OP_LDoNInspect] = &Client::Handle_OP_LDoNInspect;
ConnectedOpcodes[OP_LDoNOpen] = &Client::Handle_OP_LDoNOpen;
ConnectedOpcodes[OP_LDoNPickLock] = &Client::Handle_OP_LDoNPickLock;
ConnectedOpcodes[OP_LDoNSenseTraps] = &Client::Handle_OP_LDoNSenseTraps;
ConnectedOpcodes[OP_LeadershipExpToggle] = &Client::Handle_OP_LeadershipExpToggle;
ConnectedOpcodes[OP_LeaveAdventure] = &Client::Handle_OP_LeaveAdventure;
ConnectedOpcodes[OP_LeaveBoat] = &Client::Handle_OP_LeaveBoat;
ConnectedOpcodes[OP_LFGCommand] = &Client::Handle_OP_LFGCommand;
ConnectedOpcodes[OP_LFGGetMatchesRequest] = &Client::Handle_OP_LFGGetMatchesRequest;
ConnectedOpcodes[OP_LFGuild] = &Client::Handle_OP_LFGuild;
ConnectedOpcodes[OP_LFPCommand] = &Client::Handle_OP_LFPCommand;
ConnectedOpcodes[OP_LFPGetMatchesRequest] = &Client::Handle_OP_LFPGetMatchesRequest;
ConnectedOpcodes[OP_LoadSpellSet] = &Client::Handle_OP_LoadSpellSet;
ConnectedOpcodes[OP_Logout] = &Client::Handle_OP_Logout;
ConnectedOpcodes[OP_LootItem] = &Client::Handle_OP_LootItem;
ConnectedOpcodes[OP_LootRequest] = &Client::Handle_OP_LootRequest;
ConnectedOpcodes[OP_ManaChange] = &Client::Handle_OP_ManaChange;
ConnectedOpcodes[OP_MemorizeSpell] = &Client::Handle_OP_MemorizeSpell;
ConnectedOpcodes[OP_Mend] = &Client::Handle_OP_Mend;
ConnectedOpcodes[OP_MercenaryCommand] = &Client::Handle_OP_MercenaryCommand;
ConnectedOpcodes[OP_MercenaryDataRequest] = &Client::Handle_OP_MercenaryDataRequest;
ConnectedOpcodes[OP_MercenaryDataUpdateRequest] = &Client::Handle_OP_MercenaryDataUpdateRequest;
ConnectedOpcodes[OP_MercenaryDismiss] = &Client::Handle_OP_MercenaryDismiss;
ConnectedOpcodes[OP_MercenaryHire] = &Client::Handle_OP_MercenaryHire;
ConnectedOpcodes[OP_MercenarySuspendRequest] = &Client::Handle_OP_MercenarySuspendRequest;
ConnectedOpcodes[OP_MercenaryTimerRequest] = &Client::Handle_OP_MercenaryTimerRequest;
ConnectedOpcodes[OP_MoveCoin] = &Client::Handle_OP_MoveCoin;
ConnectedOpcodes[OP_MoveItem] = &Client::Handle_OP_MoveItem;
ConnectedOpcodes[OP_MoveMultipleItems] = &Client::Handle_OP_MoveMultipleItems;
ConnectedOpcodes[OP_OpenContainer] = &Client::Handle_OP_OpenContainer;
ConnectedOpcodes[OP_OpenGuildTributeMaster] = &Client::Handle_OP_OpenGuildTributeMaster;
ConnectedOpcodes[OP_OpenInventory] = &Client::Handle_OP_OpenInventory;
ConnectedOpcodes[OP_OpenTributeMaster] = &Client::Handle_OP_OpenTributeMaster;
ConnectedOpcodes[OP_PDeletePetition] = &Client::Handle_OP_PDeletePetition;
ConnectedOpcodes[OP_PetCommands] = &Client::Handle_OP_PetCommands;
ConnectedOpcodes[OP_Petition] = &Client::Handle_OP_Petition;
ConnectedOpcodes[OP_PetitionBug] = &Client::Handle_OP_PetitionBug;
ConnectedOpcodes[OP_PetitionCheckIn] = &Client::Handle_OP_PetitionCheckIn;
ConnectedOpcodes[OP_PetitionCheckout] = &Client::Handle_OP_PetitionCheckout;
ConnectedOpcodes[OP_PetitionDelete] = &Client::Handle_OP_PetitionDelete;
ConnectedOpcodes[OP_PetitionQue] = &Client::Handle_OP_PetitionQue;
ConnectedOpcodes[OP_PetitionRefresh] = &Client::Handle_OP_PetitionRefresh;
ConnectedOpcodes[OP_PetitionResolve] = &Client::Handle_OP_PetitionResolve;
ConnectedOpcodes[OP_PetitionUnCheckout] = &Client::Handle_OP_PetitionUnCheckout;
ConnectedOpcodes[OP_PlayerStateAdd] = &Client::Handle_OP_PlayerStateAdd;
ConnectedOpcodes[OP_PlayerStateRemove] = &Client::Handle_OP_PlayerStateRemove;
ConnectedOpcodes[OP_PickPocket] = &Client::Handle_OP_PickPocket;
ConnectedOpcodes[OP_PopupResponse] = &Client::Handle_OP_PopupResponse;
ConnectedOpcodes[OP_PotionBelt] = &Client::Handle_OP_PotionBelt;
ConnectedOpcodes[OP_PurchaseLeadershipAA] = &Client::Handle_OP_PurchaseLeadershipAA;
ConnectedOpcodes[OP_PVPLeaderBoardDetailsRequest] = &Client::Handle_OP_PVPLeaderBoardDetailsRequest;
ConnectedOpcodes[OP_PVPLeaderBoardRequest] = &Client::Handle_OP_PVPLeaderBoardRequest;
ConnectedOpcodes[OP_QueryUCSServerStatus] = &Client::Handle_OP_QueryUCSServerStatus;
ConnectedOpcodes[OP_RaidInvite] = &Client::Handle_OP_RaidCommand;
ConnectedOpcodes[OP_RandomReq] = &Client::Handle_OP_RandomReq;
ConnectedOpcodes[OP_ReadBook] = &Client::Handle_OP_ReadBook;
ConnectedOpcodes[OP_RecipeAutoCombine] = &Client::Handle_OP_RecipeAutoCombine;
ConnectedOpcodes[OP_RecipeDetails] = &Client::Handle_OP_RecipeDetails;
ConnectedOpcodes[OP_RecipesFavorite] = &Client::Handle_OP_RecipesFavorite;
ConnectedOpcodes[OP_RecipesSearch] = &Client::Handle_OP_RecipesSearch;
ConnectedOpcodes[OP_ReloadUI] = &Client::Handle_OP_ReloadUI;
ConnectedOpcodes[OP_RemoveBlockedBuffs] = &Client::Handle_OP_RemoveBlockedBuffs;
ConnectedOpcodes[OP_RemoveTrap] = &Client::Handle_OP_RemoveTrap;
ConnectedOpcodes[OP_Report] = &Client::Handle_OP_Report;
ConnectedOpcodes[OP_RequestDuel] = &Client::Handle_OP_RequestDuel;
ConnectedOpcodes[OP_RequestTitles] = &Client::Handle_OP_RequestTitles;
ConnectedOpcodes[OP_RespawnWindow] = &Client::Handle_OP_RespawnWindow;
ConnectedOpcodes[OP_Rewind] = &Client::Handle_OP_Rewind;
ConnectedOpcodes[OP_RezzAnswer] = &Client::Handle_OP_RezzAnswer;
ConnectedOpcodes[OP_Sacrifice] = &Client::Handle_OP_Sacrifice;
ConnectedOpcodes[OP_SafeFallSuccess] = &Client::Handle_OP_SafeFallSuccess;
ConnectedOpcodes[OP_SafePoint] = &Client::Handle_OP_SafePoint;
ConnectedOpcodes[OP_Save] = &Client::Handle_OP_Save;
ConnectedOpcodes[OP_SaveOnZoneReq] = &Client::Handle_OP_SaveOnZoneReq;
ConnectedOpcodes[OP_SelectTribute] = &Client::Handle_OP_SelectTribute;
// Use or Ignore sense heading based on rule.
bool train = RuleB(Skills, TrainSenseHeading);
ConnectedOpcodes[OP_SenseHeading] =
(train) ? &Client::Handle_OP_SenseHeading : &Client::Handle_OP_Ignore;
ConnectedOpcodes[OP_SenseTraps] = &Client::Handle_OP_SenseTraps;
ConnectedOpcodes[OP_SetGuildMOTD] = &Client::Handle_OP_SetGuildMOTD;
ConnectedOpcodes[OP_SetRunMode] = &Client::Handle_OP_SetRunMode;
ConnectedOpcodes[OP_SetServerFilter] = &Client::Handle_OP_SetServerFilter;
ConnectedOpcodes[OP_SetStartCity] = &Client::Handle_OP_SetStartCity;
ConnectedOpcodes[OP_SetTitle] = &Client::Handle_OP_SetTitle;
ConnectedOpcodes[OP_Shielding] = &Client::Handle_OP_Shielding;
ConnectedOpcodes[OP_ShopEnd] = &Client::Handle_OP_ShopEnd;
ConnectedOpcodes[OP_ShopPlayerBuy] = &Client::Handle_OP_ShopPlayerBuy;
ConnectedOpcodes[OP_ShopPlayerSell] = &Client::Handle_OP_ShopPlayerSell;
ConnectedOpcodes[OP_ShopRequest] = &Client::Handle_OP_ShopRequest;
ConnectedOpcodes[OP_Sneak] = &Client::Handle_OP_Sneak;
ConnectedOpcodes[OP_SpawnAppearance] = &Client::Handle_OP_SpawnAppearance;
ConnectedOpcodes[OP_Split] = &Client::Handle_OP_Split;
ConnectedOpcodes[OP_Surname] = &Client::Handle_OP_Surname;
ConnectedOpcodes[OP_SwapSpell] = &Client::Handle_OP_SwapSpell;
ConnectedOpcodes[OP_TargetCommand] = &Client::Handle_OP_TargetCommand;
ConnectedOpcodes[OP_TargetMouse] = &Client::Handle_OP_TargetMouse;
ConnectedOpcodes[OP_TaskHistoryRequest] = &Client::Handle_OP_TaskHistoryRequest;
ConnectedOpcodes[OP_Taunt] = &Client::Handle_OP_Taunt;
ConnectedOpcodes[OP_TestBuff] = &Client::Handle_OP_TestBuff;
ConnectedOpcodes[OP_TGB] = &Client::Handle_OP_TGB;
ConnectedOpcodes[OP_Track] = &Client::Handle_OP_Track;
ConnectedOpcodes[OP_TrackTarget] = &Client::Handle_OP_TrackTarget;
ConnectedOpcodes[OP_TrackUnknown] = &Client::Handle_OP_TrackUnknown;
ConnectedOpcodes[OP_TradeAcceptClick] = &Client::Handle_OP_TradeAcceptClick;
ConnectedOpcodes[OP_TradeBusy] = &Client::Handle_OP_TradeBusy;
ConnectedOpcodes[OP_Trader] = &Client::Handle_OP_Trader;
ConnectedOpcodes[OP_TraderBuy] = &Client::Handle_OP_TraderBuy;
ConnectedOpcodes[OP_TradeRequest] = &Client::Handle_OP_TradeRequest;
ConnectedOpcodes[OP_TradeRequestAck] = &Client::Handle_OP_TradeRequestAck;
ConnectedOpcodes[OP_TraderShop] = &Client::Handle_OP_TraderShop;
ConnectedOpcodes[OP_TradeSkillCombine] = &Client::Handle_OP_TradeSkillCombine;
ConnectedOpcodes[OP_Translocate] = &Client::Handle_OP_Translocate;
ConnectedOpcodes[OP_TributeItem] = &Client::Handle_OP_TributeItem;
ConnectedOpcodes[OP_TributeMoney] = &Client::Handle_OP_TributeMoney;
ConnectedOpcodes[OP_TributeNPC] = &Client::Handle_OP_TributeNPC;
ConnectedOpcodes[OP_TributeToggle] = &Client::Handle_OP_TributeToggle;
ConnectedOpcodes[OP_TributeUpdate] = &Client::Handle_OP_TributeUpdate;
ConnectedOpcodes[OP_VetClaimRequest] = &Client::Handle_OP_VetClaimRequest;
ConnectedOpcodes[OP_VoiceMacroIn] = &Client::Handle_OP_VoiceMacroIn;
ConnectedOpcodes[OP_UpdateAura] = &Client::Handle_OP_UpdateAura;;
ConnectedOpcodes[OP_WearChange] = &Client::Handle_OP_WearChange;
ConnectedOpcodes[OP_WhoAllRequest] = &Client::Handle_OP_WhoAllRequest;
ConnectedOpcodes[OP_WorldUnknown001] = &Client::Handle_OP_Ignore;
ConnectedOpcodes[OP_XTargetAutoAddHaters] = &Client::Handle_OP_XTargetAutoAddHaters;
ConnectedOpcodes[OP_XTargetOpen] = &Client::Handle_OP_XTargetOpen;
ConnectedOpcodes[OP_XTargetRequest] = &Client::Handle_OP_XTargetRequest;
ConnectedOpcodes[OP_YellForHelp] = &Client::Handle_OP_YellForHelp;
ConnectedOpcodes[OP_ZoneChange] = &Client::Handle_OP_ZoneChange;
ConnectedOpcodes[OP_ResetAA] = &Client::Handle_OP_ResetAA;
}
void ClearMappedOpcode(EmuOpcode op)
{
if (op >= _maxEmuOpcode)
return;
ConnectedOpcodes[op] = nullptr;
auto iter = ConnectingOpcodes.find(op);
if (iter != ConnectingOpcodes.end()) {
ConnectingOpcodes.erase(iter);
}
}
// client methods
int Client::HandlePacket(const EQApplicationPacket *app)
{
if (LogSys.log_settings[Logs::LogCategory::Netcode].is_category_enabled == 1) {
char buffer[64];
app->build_header_dump(buffer);
Log(Logs::Detail, Logs::PacketClientServer, "Dispatch opcode: %s", buffer);
}
if (LogSys.log_settings[Logs::PacketClientServer].is_category_enabled == 1)
Log(Logs::General, Logs::PacketClientServer, "[%s - 0x%04x] [Size: %u]", OpcodeManager::EmuToName(app->GetOpcode()), app->GetOpcode(), app->Size());
if (LogSys.log_settings[Logs::PacketClientServerWithDump].is_category_enabled == 1)
Log(Logs::General, Logs::PacketClientServerWithDump, "[%s - 0x%04x] [Size: %u] %s", OpcodeManager::EmuToName(app->GetOpcode()), app->GetOpcode(), app->Size(), DumpPacketToString(app).c_str());
EmuOpcode opcode = app->GetOpcode();
if (opcode == OP_AckPacket) {
return true;
}
#if EQDEBUG >= 9
std::cout << "Received 0x" << std::hex << std::setw(4) << std::setfill('0') << opcode << ", size=" << std::dec << app->size << std::endl;
#endif
switch (client_state) {
case CLIENT_CONNECTING: {
if (ConnectingOpcodes.count(opcode) != 1) {
//Hate const cast but everything in lua needs to be non-const even if i make it non-mutable
std::vector<EQEmu::Any> args;
args.push_back(const_cast<EQApplicationPacket*>(app));
parse->EventPlayer(EVENT_UNHANDLED_OPCODE, this, "", 1, &args);
break;
}
ClientPacketProc p;
p = ConnectingOpcodes[opcode];
//call the processing routine
(this->*p)(app);
//special case where connecting code needs to boot client...
if (client_state == CLIENT_KICKED) {
return(false);
}
break;
}
case CLIENT_CONNECTED: {
ClientPacketProc p;
p = ConnectedOpcodes[opcode];
if (p == nullptr) {
std::vector<EQEmu::Any> args;
args.push_back(const_cast<EQApplicationPacket*>(app));
parse->EventPlayer(EVENT_UNHANDLED_OPCODE, this, "", 0, &args);
if (LogSys.log_settings[Logs::PacketClientServerUnhandled].is_category_enabled == 1) {
char buffer[64];
app->build_header_dump(buffer);
Log(Logs::General, Logs::PacketClientServerUnhandled, "%s %s", buffer, DumpPacketToString(app).c_str());
}
break;
}
//call the processing routine
(this->*p)(app);
break;
}
case CLIENT_KICKED:
case DISCONNECTED:
case CLIENT_LINKDEAD:
break;
default:
LogDebug("Unknown client_state: [{}]\n", client_state);
break;
}
return(true);
}
// Finish client connecting state
void Client::CompleteConnect()
{
UpdateWho();
client_state = CLIENT_CONNECTED;
SendAllPackets();
hpupdate_timer.Start();
autosave_timer.Start();
SetDuelTarget(0);
SetDueling(false);
EnteringMessages(this);
LoadZoneFlags();
/* Sets GM Flag if needed & Sends Petition Queue */
UpdateAdmin(false);
if (IsInAGuild()) {
uint8 rank = GuildRank();
if (ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
{
switch (rank) {
case 0: { rank = 5; break; } // GUILD_MEMBER 0
case 1: { rank = 3; break; } // GUILD_OFFICER 1
case 2: { rank = 1; break; } // GUILD_LEADER 2
default: { break; } // GUILD_NONE
}
}
SendAppearancePacket(AT_GuildID, GuildID(), false);
SendAppearancePacket(AT_GuildRank, rank, false);
}
// moved to dbload and translators since we iterate there also .. keep m_pp values whatever they are when they get here
/*const auto sbs = EQEmu::spells::DynamicLookup(ClientVersion(), GetGM())->SpellbookSize;
for (uint32 spellInt = 0; spellInt < sbs; ++spellInt) {
if (m_pp.spell_book[spellInt] < 3 || m_pp.spell_book[spellInt] > EQEmu::spells::SPELL_ID_MAX)
m_pp.spell_book[spellInt] = 0xFFFFFFFF;
}*/
//SendAATable();
if (GetHideMe()) Message(Chat::Red, "[GM] You are currently hidden to all clients");
uint32 raidid = database.GetRaidID(GetName());
Raid *raid = nullptr;
if (raidid > 0) {
raid = entity_list.GetRaidByID(raidid);
if (!raid) {
raid = new Raid(raidid);
if (raid->GetID() != 0) {
entity_list.AddRaid(raid, raidid);
raid->LoadLeadership(); // Recreating raid in new zone, get leadership from DB
}
else
raid = nullptr;
}
if (raid) {
SetRaidGrouped(true);
raid->LearnMembers();
raid->VerifyRaid();
raid->GetRaidDetails();
/*
Only leader should get this; send to all for now till
I figure out correct creation; can probably also send a no longer leader packet for non leaders
but not important for now.
*/
raid->SendRaidCreate(this);
raid->SendMakeLeaderPacketTo(raid->leadername, this);
raid->SendRaidAdd(GetName(), this);
raid->SendBulkRaid(this);
raid->SendGroupUpdate(this);
raid->SendRaidMOTD(this);
if (raid->IsLeader(this)) { // We're a raid leader, lets update just in case!
raid->UpdateRaidAAs();
raid->SendAllRaidLeadershipAA();
}
uint32 grpID = raid->GetGroup(GetName());
if (grpID < 12) {
raid->SendRaidGroupRemove(GetName(), grpID);
raid->SendRaidGroupAdd(GetName(), grpID);
raid->CheckGroupMentor(grpID, this);
if (raid->IsGroupLeader(GetName())) { // group leader same thing!
raid->UpdateGroupAAs(raid->GetGroup(this));
raid->GroupUpdate(grpID, false);
}
}
raid->SendGroupLeadershipAA(this, grpID); // this may get sent an extra time ...
SetXTargetAutoMgr(raid->GetXTargetAutoMgr());
if (!GetXTargetAutoMgr()->empty())
SetDirtyAutoHaters();
if (raid->IsLocked())
raid->SendRaidLockTo(this);
raid->SendHPManaEndPacketsTo(this);
}
}
else {
Group *group = nullptr;
group = this->GetGroup();
if (group)
group->SendHPManaEndPacketsTo(this);
}
//bulk raid send in here eventually
//reapply some buffs
uint32 buff_count = GetMaxTotalSlots();
for (uint32 j1 = 0; j1 < buff_count; j1++) {
if (!IsValidSpell(buffs[j1].spellid))
continue;
const SPDat_Spell_Struct &spell = spells[buffs[j1].spellid];
int NimbusEffect = GetNimbusEffect(buffs[j1].spellid);
if (NimbusEffect) {
if (!IsNimbusEffectActive(NimbusEffect))
SendSpellEffect(NimbusEffect, 500, 0, 1, 3000, true);
}
for (int x1 = 0; x1 < EFFECT_COUNT; x1++) {
switch (spell.effectid[x1]) {
case SE_IllusionCopy:
case SE_Illusion: {
if (spell.base[x1] == -1) {
if (gender == 1)
gender = 0;
else if (gender == 0)
gender = 1;
SendIllusionPacket(GetRace(), gender, 0xFF, 0xFF);
}
else if (spell.base[x1] == -2) // WTF IS THIS
{
if (GetRace() == 128 || GetRace() == 130 || GetRace() <= 12)
SendIllusionPacket(GetRace(), GetGender(), spell.base2[x1], spell.max[x1]);
}
else if (spell.max[x1] > 0)
{
SendIllusionPacket(spell.base[x1], 0xFF, spell.base2[x1], spell.max[x1]);
}
else
{
SendIllusionPacket(spell.base[x1], 0xFF, 0xFF, 0xFF);
}
switch (spell.base[x1]) {
case OGRE:
SendAppearancePacket(AT_Size, 9);
break;
case TROLL:
SendAppearancePacket(AT_Size, 8);
break;
case VAHSHIR:
case BARBARIAN:
SendAppearancePacket(AT_Size, 7);
break;
case HALF_ELF:
case WOOD_ELF:
case DARK_ELF:
case FROGLOK:
SendAppearancePacket(AT_Size, 5);
break;
case DWARF:
SendAppearancePacket(AT_Size, 4);
break;
case HALFLING:
case GNOME:
SendAppearancePacket(AT_Size, 3);
break;
default:
SendAppearancePacket(AT_Size, 6);
break;
}
break;
}
case SE_SummonHorse: {
SummonHorse(buffs[j1].spellid);
//hasmount = true; //this was false, is that the correct thing?
break;
}
case SE_Silence:
{
Silence(true);
break;
}
case SE_Amnesia:
{
Amnesia(true);
break;
}
case SE_DivineAura:
{
invulnerable = true;
break;
}
case SE_Invisibility2:
case SE_Invisibility:
{
invisible = true;
SendAppearancePacket(AT_Invis, 1);
break;
}
case SE_Levitate:
{
if (!zone->CanLevitate())
{
if (!GetGM())
{
SendAppearancePacket(AT_Levitate, 0);
BuffFadeByEffect(SE_Levitate);
Message(Chat::Red, "You can't levitate in this zone.");
}
}
else {
SendAppearancePacket(AT_Levitate, 2);
}
break;
}
case SE_InvisVsUndead2:
case SE_InvisVsUndead:
{
invisible_undead = true;
break;
}
case SE_InvisVsAnimals:
{
invisible_animals = true;
break;
}
case SE_AddMeleeProc:
case SE_WeaponProc:
{
AddProcToWeapon(GetProcID(buffs[j1].spellid, x1), false, 100 + spells[buffs[j1].spellid].base2[x1], buffs[j1].spellid, buffs[j1].casterlevel);
break;
}
case SE_DefensiveProc:
{
AddDefensiveProc(GetProcID(buffs[j1].spellid, x1), 100 + spells[buffs[j1].spellid].base2[x1], buffs[j1].spellid);
break;
}
case SE_RangedProc:
{
AddRangedProc(GetProcID(buffs[j1].spellid, x1), 100 + spells[buffs[j1].spellid].base2[x1], buffs[j1].spellid);
break;
}
}
}
}
/* Sends appearances for all mobs not doing anim_stand aka sitting, looting, playing dead */
entity_list.SendZoneAppearance(this);
/* Sends the Nimbus particle effects (up to 3) for any mob using them */
entity_list.SendNimbusEffects(this);
entity_list.SendUntargetable(this);
int x;
for (x = EQEmu::textures::textureBegin; x <= EQEmu::textures::LastTexture; x++) {
SendWearChange(x);
}
// added due to wear change above
UpdateActiveLight();
SendAppearancePacket(AT_Light, GetActiveLightType());
Mob *pet = GetPet();
if (pet != nullptr) {
for (x = EQEmu::textures::textureBegin; x <= EQEmu::textures::LastTexture; x++) {
pet->SendWearChange(x);
}
// added due to wear change above
pet->UpdateActiveLight();
pet->SendAppearancePacket(AT_Light, pet->GetActiveLightType());
}
entity_list.SendTraders(this);
if (GetPet()) {
GetPet()->SendPetBuffsToClient();
}
if (GetGroup())
database.RefreshGroupFromDB(this);
if (RuleB(TaskSystem, EnableTaskSystem))
TaskPeriodic_Timer.Start();
else
TaskPeriodic_Timer.Disable();
conn_state = ClientConnectFinished;
//enforce some rules..
if (!CanBeInZone()) {
LogDebug("[CLIENT] Kicking char from zone, not allowed here");
GoToSafeCoords(database.GetZoneID("arena"), 0);
return;
}
if (zone)
zone->weatherSend(this);
TotalKarma = database.GetKarma(AccountID());
SendDisciplineTimers();
parse->EventPlayer(EVENT_ENTER_ZONE, this, "", 0);
SetLastPositionBeforeBulkUpdate(GetPosition());
/* This sub event is for if a player logs in for the first time since entering world. */
if (firstlogon == 1) {
parse->EventPlayer(EVENT_CONNECT, this, "", 0);
/* QS: PlayerLogConnectDisconnect */
if (RuleB(QueryServ, PlayerLogConnectDisconnect)) {
std::string event_desc = StringFormat("Connect :: Logged into zoneid:%i instid:%i", this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Connect_State, this->CharacterID(), event_desc);
}
/**
* Update last login since this doesn't get updated until a late save later so we can update online status
*/
database.QueryDatabase(
StringFormat(
"UPDATE `character_data` SET `last_login` = UNIX_TIMESTAMP() WHERE id = %u",
this->CharacterID()
)
);
}
if (zone) {
if (zone->GetInstanceTimer()) {
uint32 ttime = zone->GetInstanceTimer()->GetRemainingTime();
uint32 day = (ttime / 86400000);
uint32 hour = (ttime / 3600000) % 24;
uint32 minute = (ttime / 60000) % 60;
uint32 second = (ttime / 1000) % 60;
if (day) {
Message(Chat::Yellow, "%s(%u) will expire in %u days, %u hours, %u minutes, and %u seconds.",
zone->GetLongName(), zone->GetInstanceID(), day, hour, minute, second);
}
else if (hour) {
Message(Chat::Yellow, "%s(%u) will expire in %u hours, %u minutes, and %u seconds.",
zone->GetLongName(), zone->GetInstanceID(), hour, minute, second);
}
else if (minute) {
Message(Chat::Yellow, "%s(%u) will expire in %u minutes, and %u seconds.",
zone->GetLongName(), zone->GetInstanceID(), minute, second);
}
else {
Message(Chat::Yellow, "%s(%u) will expire in in %u seconds.",
zone->GetLongName(), zone->GetInstanceID(), second);
}
}
}
SendRewards();
SendAltCurrencies();
database.LoadAltCurrencyValues(CharacterID(), alternate_currency);
SendAlternateCurrencyValues();
alternate_currency_loaded = true;
ProcessAlternateCurrencyQueue();
/* This needs to be set, this determines whether or not data was loaded properly before a save */
client_data_loaded = true;
CalcItemScale();
DoItemEnterZone();
if (zone->GetZoneID() == RuleI(World, GuildBankZoneID) && GuildBanks)
GuildBanks->SendGuildBank(this);
if (ClientVersion() >= EQEmu::versions::ClientVersion::SoD)
entity_list.SendFindableNPCList(this);
if (IsInAGuild()) {
SendGuildRanks();
guild_mgr.SendGuildMemberUpdateToWorld(GetName(), GuildID(), zone->GetZoneID(), time(nullptr));
guild_mgr.RequestOnlineGuildMembers(this->CharacterID(), this->GuildID());
}
/** Request adventure info **/
auto pack = new ServerPacket(ServerOP_AdventureDataRequest, 64);
strcpy((char*)pack->pBuffer, GetName());
worldserver.SendPacket(pack);
delete pack;
if (IsClient() && CastToClient()->ClientVersionBit() & EQEmu::versions::maskUFAndLater) {
EQApplicationPacket *outapp = MakeBuffsPacket(false);
CastToClient()->FastQueuePacket(&outapp);
}
// TODO: load these states
// We at least will set them to the correct state for now
if (m_ClientVersionBit & EQEmu::versions::maskUFAndLater && GetPet()) {
SetPetCommandState(PET_BUTTON_SIT, 0);
SetPetCommandState(PET_BUTTON_STOP, 0);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
SetPetCommandState(PET_BUTTON_FOLLOW, 1);
SetPetCommandState(PET_BUTTON_GUARD, 0);
SetPetCommandState(PET_BUTTON_TAUNT, 1);
SetPetCommandState(PET_BUTTON_HOLD, 0);
SetPetCommandState(PET_BUTTON_GHOLD, 0);
SetPetCommandState(PET_BUTTON_FOCUS, 0);
SetPetCommandState(PET_BUTTON_SPELLHOLD, 0);
}
database.LoadAuras(this); // this ends up spawning them so probably safer to load this later (here)
entity_list.RefreshClientXTargets(this);
worldserver.RequestTellQueue(GetName());
entity_list.ScanCloseMobs(close_mobs, this);
}
// connecting opcode handlers
/*
void Client::Handle_Connect_0x3e33(const EQApplicationPacket *app)
{
//OP_0x0380 = 0x642c
EQApplicationPacket* outapp = new EQApplicationPacket(OP_0x0380, sizeof(uint32)); // Dunno
QueuePacket(outapp);
safe_delete(outapp);
return;
}
*/
void Client::Handle_Connect_OP_ApproveZone(const EQApplicationPacket *app)
{
if (app->size != sizeof(ApproveZone_Struct)) {
LogError("Invalid size on OP_ApproveZone: Expected [{}], Got [{}]",
sizeof(ApproveZone_Struct), app->size);
return;
}
ApproveZone_Struct* azone = (ApproveZone_Struct*)app->pBuffer;
azone->approve = 1;
QueuePacket(app);
return;
}
void Client::Handle_Connect_OP_ClientError(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClientError_Struct)) {
LogError("Invalid size on OP_ClientError: Expected [{}], Got [{}]",
sizeof(ClientError_Struct), app->size);
return;
}
// Client reporting error to server
ClientError_Struct* error = (ClientError_Struct*)app->pBuffer;
LogError("Client error: [{}]", error->character_name);
LogError("Error message: [{}]", error->message);
Message(Chat::Red, error->message);
#if (EQDEBUG>=5)
DumpPacket(app);
#endif
return;
}
void Client::Handle_Connect_OP_ClientReady(const EQApplicationPacket *app)
{
conn_state = ClientReadyReceived;
if (!Spawned())
SendZoneInPackets();
CompleteConnect();
SendHPUpdate();
}
void Client::Handle_Connect_OP_ClientUpdate(const EQApplicationPacket *app)
{
//Once we get this, the client thinks it is connected
//So give it the benefit of the doubt and move to connected
Handle_Connect_OP_ClientReady(app);
}
void Client::Handle_Connect_OP_ReqClientSpawn(const EQApplicationPacket *app)
{
conn_state = ClientSpawnRequested;
auto outapp = new EQApplicationPacket;
// Send Zone Doors
if (entity_list.MakeDoorSpawnPacket(outapp, this))
{
QueuePacket(outapp);
}
safe_delete(outapp);
// Send Zone Objects
entity_list.SendZoneObjects(this);
SendZonePoints();
// Live does this
outapp = new EQApplicationPacket(OP_SendAAStats, 0);
FastQueuePacket(&outapp);
// Tell client they can continue we're done
outapp = new EQApplicationPacket(OP_ZoneServerReady, 0);
FastQueuePacket(&outapp);
outapp = new EQApplicationPacket(OP_SendExpZonein, 0);
FastQueuePacket(&outapp);
if (ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
{
outapp = new EQApplicationPacket(OP_ClientReady, 0);
FastQueuePacket(&outapp);
}
// New for Secrets of Faydwer - Used in Place of OP_SendExpZonein
outapp = new EQApplicationPacket(OP_WorldObjectsSent, 0);
QueuePacket(outapp);
safe_delete(outapp);
if (strncasecmp(zone->GetShortName(), "bazaar", 6) == 0)
SendBazaarWelcome();
conn_state = ZoneContentsSent;
return;
}
void Client::Handle_Connect_OP_ReqNewZone(const EQApplicationPacket *app)
{
conn_state = NewZoneRequested;
EQApplicationPacket* outapp = nullptr;
/////////////////////////////////////
// New Zone Packet
outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct));
NewZone_Struct* nz = (NewZone_Struct*)outapp->pBuffer;
memcpy(outapp->pBuffer, &zone->newzone_data, sizeof(NewZone_Struct));
strcpy(nz->char_name, m_pp.name);
// This was using FastQueuePacket and the packet was never getting sent...
// Not sure if this was timing.... but the NewZone was never logged until
// I changed it.
outapp->priority = 6;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_Connect_OP_SendAAStats(const EQApplicationPacket *app)
{
SendAlternateAdvancementTimers();
auto outapp = new EQApplicationPacket(OP_SendAAStats, 0);
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_Connect_OP_SendAATable(const EQApplicationPacket *app)
{
SendAlternateAdvancementTable();
return;
}
void Client::Handle_Connect_OP_SendExpZonein(const EQApplicationPacket *app)
{
auto outapp = new EQApplicationPacket(OP_SendExpZonein, 0);
QueuePacket(outapp);
safe_delete(outapp);
// SoF+ Gets Zone-In packets after sending OP_WorldObjectsSent
if (ClientVersion() < EQEmu::versions::ClientVersion::SoF)
{
SendZoneInPackets();
}
return;
}
void Client::Handle_Connect_OP_SendGuildTributes(const EQApplicationPacket *app)
{
SendGuildTributes();
return;
}
void Client::Handle_Connect_OP_SendTributes(const EQApplicationPacket *app)
{
SendTributes();
return;
}
void Client::Handle_Connect_OP_SetServerFilter(const EQApplicationPacket *app)
{
if (app->size != sizeof(SetServerFilter_Struct)) {
LogError("Received invalid sized OP_SetServerFilter");
DumpPacket(app);
return;
}
SetServerFilter_Struct* filter = (SetServerFilter_Struct*)app->pBuffer;
ServerFilter(filter);
return;
}
void Client::Handle_Connect_OP_SpawnAppearance(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_Connect_OP_TGB(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
LogError("Invalid size on OP_TGB: Expected [{}], Got [{}]",
sizeof(uint32), app->size);
return;
}
OPTGB(app);
return;
}
void Client::Handle_Connect_OP_UpdateAA(const EQApplicationPacket *app)
{
SendAlternateAdvancementPoints();
}
void Client::Handle_Connect_OP_WearChange(const EQApplicationPacket *app)
{
//not sure what these are supposed to mean to us.
return;
}
void Client::Handle_Connect_OP_WorldObjectsSent(const EQApplicationPacket *app)
{
// New for SoF+
auto outapp = new EQApplicationPacket(OP_WorldObjectsSent, 0);
QueuePacket(outapp);
safe_delete(outapp);
// Packet order changed for SoF+, so below is sent here instead of OP_SendExpLogin
SendZoneInPackets();
if (RuleB(Mercs, AllowMercs))
{
SpawnMercOnZone();
}
return;
}
void Client::Handle_Connect_OP_ZoneComplete(const EQApplicationPacket *app)
{
auto outapp = new EQApplicationPacket(OP_0x0347, 0);
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_Connect_OP_ZoneEntry(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClientZoneEntry_Struct))
return;
ClientZoneEntry_Struct *cze = (ClientZoneEntry_Struct *)app->pBuffer;
if (strlen(cze->char_name) > 63)
return;
conn_state = ReceivedZoneEntry;
SetClientVersion(Connection()->ClientVersion());
m_ClientVersionBit = EQEmu::versions::ConvertClientVersionToClientVersionBit(Connection()->ClientVersion());
m_pp.SetPlayerProfileVersion(m_ClientVersion);
m_inv.SetInventoryVersion(m_ClientVersion);
/* Antighost code
tmp var is so the search doesnt find this object
*/
Client* client = entity_list.GetClientByName(cze->char_name);
if (!zone->GetAuth(ip, cze->char_name, &WID, &account_id, &character_id, &admin, lskey, &tellsoff)) {
LogClientLogin("[{}] failed zone auth check", cze->char_name);
if (nullptr != client) {
client->Save();
client->Kick("Failed auth check");
}
return;
}
strcpy(name, cze->char_name);
/* Check for Client Spoofing */
if (client != 0) {
struct in_addr ghost_addr;
ghost_addr.s_addr = eqs->GetRemoteIP();
LogError("Ghosting client: Account ID:[{}] Name:[{}] Character:[{}] IP:[{}]",
client->AccountID(), client->AccountName(), client->GetName(), inet_ntoa(ghost_addr));
client->Save();
client->Disconnect();
}
uint32 pplen = 0;
EQApplicationPacket* outapp = nullptr;
MYSQL_RES* result = nullptr;
bool loaditems = 0;
uint32 i;
std::string query;
unsigned long* lengths = nullptr;
uint32 cid = CharacterID();
character_id = cid; /* Global character_id reference */
/* Flush and reload factions */
database.RemoveTempFactions(this);
database.LoadCharacterFactionValues(cid, factionvalues);
/* Load Character Account Data: Temp until I move */
query = StringFormat("SELECT `status`, `name`, `ls_id`, `lsaccount_id`, `gmspeed`, `revoked`, `hideme`, `time_creation` FROM `account` WHERE `id` = %u", this->AccountID());
auto results = database.QueryDatabase(query);
for (auto row = results.begin(); row != results.end(); ++row) {
admin = atoi(row[0]);
strn0cpy(account_name, row[1], sizeof(account_name));
strn0cpy(loginserver, row[2], sizeof(loginserver));
lsaccountid = atoi(row[3]);
gmspeed = atoi(row[4]);
revoked = atoi(row[5]);
gm_hide_me = atoi(row[6]);
account_creation = atoul(row[7]);
}
/* Load Character Data */
query = StringFormat("SELECT `lfp`, `lfg`, `xtargets`, `firstlogon`, `guild_id`, `rank` FROM `character_data` LEFT JOIN `guild_members` ON `id` = `char_id` WHERE `id` = %i", cid);
results = database.QueryDatabase(query);
for (auto row = results.begin(); row != results.end(); ++row) {
if (row[4] && atoi(row[4]) > 0) {
guild_id = atoi(row[4]);
if (row[5] != nullptr) { guildrank = atoi(row[5]); }
else { guildrank = GUILD_RANK_NONE; }
}
if (LFP) { LFP = atoi(row[0]); }
if (LFG) { LFG = atoi(row[1]); }
if (row[3])
firstlogon = atoi(row[3]);
}
if (RuleB(Character, SharedBankPlat))
m_pp.platinum_shared = database.GetSharedPlatinum(this->AccountID());
database.ClearOldRecastTimestamps(cid); /* Clear out our old recast timestamps to keep the DB clean */
// set to full support in case they're a gm with items in disabled expansion slots..but, have their gm flag off...
// item loss will occur when they use the 'empty' slots, if this is not done
m_inv.SetGMInventory(true);
loaditems = database.GetInventory(cid, &m_inv); /* Load Character Inventory */
database.LoadCharacterBandolier(cid, &m_pp); /* Load Character Bandolier */
database.LoadCharacterBindPoint(cid, &m_pp); /* Load Character Bind */
database.LoadCharacterMaterialColor(cid, &m_pp); /* Load Character Material */
database.LoadCharacterPotions(cid, &m_pp); /* Load Character Potion Belt */
database.LoadCharacterCurrency(cid, &m_pp); /* Load Character Currency into PP */
database.LoadCharacterData(cid, &m_pp, &m_epp); /* Load Character Data from DB into PP as well as E_PP */
database.LoadCharacterSkills(cid, &m_pp); /* Load Character Skills */
database.LoadCharacterInspectMessage(cid, &m_inspect_message); /* Load Character Inspect Message */
database.LoadCharacterSpellBook(cid, &m_pp); /* Load Character Spell Book */
database.LoadCharacterMemmedSpells(cid, &m_pp); /* Load Character Memorized Spells */
database.LoadCharacterDisciplines(cid, &m_pp); /* Load Character Disciplines */
database.LoadCharacterLanguages(cid, &m_pp); /* Load Character Languages */
database.LoadCharacterLeadershipAA(cid, &m_pp); /* Load Character Leadership AA's */
database.LoadCharacterTribute(cid, &m_pp); /* Load CharacterTribute */
/* Load AdventureStats */
AdventureStats_Struct as;
if (database.GetAdventureStats(cid, &as))
{
m_pp.ldon_wins_guk = as.success.guk;
m_pp.ldon_wins_mir = as.success.mir;
m_pp.ldon_wins_mmc = as.success.mmc;
m_pp.ldon_wins_ruj = as.success.ruj;
m_pp.ldon_wins_tak = as.success.tak;
m_pp.ldon_losses_guk = as.failure.guk;
m_pp.ldon_losses_mir = as.failure.mir;
m_pp.ldon_losses_mmc = as.failure.mmc;
m_pp.ldon_losses_ruj = as.failure.ruj;
m_pp.ldon_losses_tak = as.failure.tak;
}
/* Set item material tint */
for (int i = EQEmu::textures::textureBegin; i <= EQEmu::textures::LastTexture; i++)
{
if (m_pp.item_tint.Slot[i].UseTint == 1 || m_pp.item_tint.Slot[i].UseTint == 255)
{
m_pp.item_tint.Slot[i].UseTint = 0xFF;
}
}
if (level) { level = m_pp.level; }
/* If GM, not trackable */
if (gm_hide_me) { trackable = false; }
/* Set Con State for Reporting */
conn_state = PlayerProfileLoaded;
m_pp.zone_id = zone->GetZoneID();
m_pp.zoneInstance = zone->GetInstanceID();
/* Set Total Seconds Played */
TotalSecondsPlayed = m_pp.timePlayedMin * 60;
/* If we can maintain intoxication across zones, check for it */
if (!RuleB(Character, MaintainIntoxicationAcrossZones))
m_pp.intoxication = 0;
strcpy(name, m_pp.name);
strcpy(lastname, m_pp.last_name);
/* If PP is set to weird coordinates */
if ((m_pp.x == -1 && m_pp.y == -1 && m_pp.z == -1) || (m_pp.x == -2 && m_pp.y == -2 && m_pp.z == -2)) {
auto safePoint = zone->GetSafePoint();
m_pp.x = safePoint.x;
m_pp.y = safePoint.y;
m_pp.z = safePoint.z;
}
/* If too far below ground, then fix */
// float ground_z = GetGroundZ(m_pp.x, m_pp.y, m_pp.z);
// if (m_pp.z < (ground_z - 500))
// m_pp.z = ground_z;
/* Set Mob variables for spawn */
class_ = m_pp.class_;
level = m_pp.level;
m_Position.x = m_pp.x;
m_Position.y = m_pp.y;
m_Position.z = m_pp.z;
m_Position.w = m_pp.heading;
race = m_pp.race;
base_race = m_pp.race;
gender = m_pp.gender;
base_gender = m_pp.gender;
deity = m_pp.deity;
haircolor = m_pp.haircolor;
beardcolor = m_pp.beardcolor;
eyecolor1 = m_pp.eyecolor1;
eyecolor2 = m_pp.eyecolor2;
hairstyle = m_pp.hairstyle;
luclinface = m_pp.face;
beard = m_pp.beard;
drakkin_heritage = m_pp.drakkin_heritage;
drakkin_tattoo = m_pp.drakkin_tattoo;
drakkin_details = m_pp.drakkin_details;
// Max Level for Character:PerCharacterQglobalMaxLevel and Character:PerCharacterBucketMaxLevel
int client_max_level = 0;
if (RuleB(Character, PerCharacterQglobalMaxLevel)) {
client_max_level = GetCharMaxLevelFromQGlobal();
} else if (RuleB(Character, PerCharacterBucketMaxLevel)) {
client_max_level = GetCharMaxLevelFromBucket();
}
SetClientMaxLevel(client_max_level);
// we know our class now, so we might have to fix our consume timer!
if (class_ == MONK)
consume_food_timer.SetTimer(CONSUMPTION_MNK_TIMER);
InitInnates();
/* If GM not set in DB, and does not meet min status to be GM, reset */
if (m_pp.gm && admin < minStatusToBeGM)
m_pp.gm = 0;
/* Load Guild */
if (!IsInAGuild()) {
m_pp.guild_id = GUILD_NONE;
}
else {
m_pp.guild_id = GuildID();
uint8 rank = guild_mgr.GetDisplayedRank(GuildID(), GuildRank(), CharacterID());
// FIXME: RoF guild rank
if (ClientVersion() >= EQEmu::versions::ClientVersion::RoF) {
switch (rank) {
case 0:
rank = 5;
break;
case 1:
rank = 3;
break;
case 2:
rank = 1;
break;
default:
break;
}
}
m_pp.guildrank = rank;
if (zone->GetZoneID() == RuleI(World, GuildBankZoneID))
GuildBanker = (guild_mgr.IsGuildLeader(GuildID(), CharacterID()) || guild_mgr.GetBankerFlag(CharacterID()));
}
m_pp.guildbanker = GuildBanker;
switch (race)
{
case OGRE:
size = 9; break;
case TROLL:
size = 8; break;
case VAHSHIR: case BARBARIAN:
size = 7; break;
case HUMAN: case HIGH_ELF: case ERUDITE: case IKSAR: case DRAKKIN:
size = 6; break;
case HALF_ELF:
size = 5.5; break;
case WOOD_ELF: case DARK_ELF: case FROGLOK:
size = 5; break;
case DWARF:
size = 4; break;
case HALFLING:
size = 3.5; break;
case GNOME:
size = 3; break;
default:
size = 0;
}
/* Check for Invalid points */
if (m_pp.ldon_points_guk < 0 || m_pp.ldon_points_guk > 2000000000) { m_pp.ldon_points_guk = 0; }
if (m_pp.ldon_points_mir < 0 || m_pp.ldon_points_mir > 2000000000) { m_pp.ldon_points_mir = 0; }
if (m_pp.ldon_points_mmc < 0 || m_pp.ldon_points_mmc > 2000000000) { m_pp.ldon_points_mmc = 0; }
if (m_pp.ldon_points_ruj < 0 || m_pp.ldon_points_ruj > 2000000000) { m_pp.ldon_points_ruj = 0; }
if (m_pp.ldon_points_tak < 0 || m_pp.ldon_points_tak > 2000000000) { m_pp.ldon_points_tak = 0; }
if (m_pp.ldon_points_available < 0 || m_pp.ldon_points_available > 2000000000) { m_pp.ldon_points_available = 0; }
if (RuleB(World, UseClientBasedExpansionSettings)) {
m_pp.expansions = EQEmu::expansions::ConvertClientVersionToExpansionsMask(ClientVersion());
}
else {
m_pp.expansions = (RuleI(World, ExpansionSettings) & EQEmu::expansions::ConvertClientVersionToExpansionsMask(ClientVersion()));
}
if (!database.LoadAlternateAdvancement(this)) {
LogError("Error loading AA points for [{}]", GetName());
}
if (SPDAT_RECORDS > 0) {
for (uint32 z = 0; z < EQEmu::spells::SPELL_GEM_COUNT; z++) {
if (m_pp.mem_spells[z] >= (uint32)SPDAT_RECORDS)
UnmemSpell(z, false);
}
database.LoadBuffs(this);
uint32 max_slots = GetMaxBuffSlots();
for (int i = 0; i < BUFF_COUNT; i++) {
if (buffs[i].spellid != SPELL_UNKNOWN) {
m_pp.buffs[i].spellid = buffs[i].spellid;
m_pp.buffs[i].bard_modifier = buffs[i].instrument_mod;
m_pp.buffs[i].effect_type = 2;
m_pp.buffs[i].player_id = 0x2211;
m_pp.buffs[i].level = buffs[i].casterlevel;
m_pp.buffs[i].unknown003 = 0;
m_pp.buffs[i].duration = buffs[i].ticsremaining;
m_pp.buffs[i].counters = buffs[i].counters;
m_pp.buffs[i].num_hits = buffs[i].numhits;
}
else {
m_pp.buffs[i].spellid = SPELLBOOK_UNKNOWN;
m_pp.buffs[i].bard_modifier = 10;
m_pp.buffs[i].effect_type = 0;
m_pp.buffs[i].player_id = 0;
m_pp.buffs[i].level = 0;
m_pp.buffs[i].unknown003 = 0;
m_pp.buffs[i].duration = 0;
m_pp.buffs[i].counters = 0;
m_pp.buffs[i].num_hits = 0;
}
}
}
/* Load Character Key Ring */
KeyRingLoad();
/* Send Group Members via PP */
uint32 groupid = database.GetGroupID(GetName());
Group* group = nullptr;
if (groupid > 0) {
group = entity_list.GetGroupByID(groupid);
if (!group) { //nobody from our is here... start a new group
group = new Group(groupid);
if (group->GetID() != 0)
entity_list.AddGroup(group, groupid);
else //error loading group members...
{
delete group;
group = nullptr;
}
} //else, somebody from our group is already here...
if (!group)
database.SetGroupID(GetName(), 0, CharacterID(), false); //cannot re-establish group, kill it
}
else { //no group id
//clear out the group junk in our PP
uint32 xy = 0;
for (xy = 0; xy < MAX_GROUP_MEMBERS; xy++)
memset(m_pp.groupMembers[xy], 0, 64);
}
if (group) {
// If the group leader is not set, pull the group leader infomrmation from the database.
if (!group->GetLeader()) {
char ln[64];
char MainTankName[64];
char AssistName[64];
char PullerName[64];
char NPCMarkerName[64];
char mentoree_name[64];
int mentor_percent;
GroupLeadershipAA_Struct GLAA;
memset(ln, 0, 64);
strcpy(ln, database.GetGroupLeadershipInfo(group->GetID(), ln, MainTankName, AssistName, PullerName, NPCMarkerName, mentoree_name, &mentor_percent, &GLAA));
Client *c = entity_list.GetClientByName(ln);
if (c)
group->SetLeader(c);
group->SetMainTank(MainTankName);
group->SetMainAssist(AssistName);
group->SetPuller(PullerName);
group->SetNPCMarker(NPCMarkerName);
group->SetGroupAAs(&GLAA);
group->SetGroupMentor(mentor_percent, mentoree_name);
//group->NotifyMainTank(this, 1);
//group->NotifyMainAssist(this, 1);
//group->NotifyPuller(this, 1);
// If we are the leader, force an update of our group AAs to other members in the zone, in case
// we purchased a new one while out-of-zone.
if (group->IsLeader(this))
group->SendLeadershipAAUpdate();
}
JoinGroupXTargets(group);
group->UpdatePlayer(this);
LFG = false;
}
#ifdef BOTS
database.botdb.LoadOwnerOptions(this);
// TODO: mod below function for loading spawned botgroups
Bot::LoadAndSpawnAllZonedBots(this);
#endif
m_inv.SetGMInventory((bool)m_pp.gm); // set to current gm state for calc
CalcBonuses();
if (RuleB(Zone, EnableLoggedOffReplenishments) &&
time(nullptr) - m_pp.lastlogin >= RuleI(Zone, MinOfflineTimeToReplenishments)) {
m_pp.cur_hp = GetMaxHP();
m_pp.mana = GetMaxMana();
m_pp.endurance = GetMaxEndurance();
}
if (m_pp.cur_hp <= 0)
m_pp.cur_hp = GetMaxHP();
SetHP(m_pp.cur_hp);
Mob::SetMana(m_pp.mana); // mob function doesn't send the packet
SetEndurance(m_pp.endurance);
/* Update LFP in case any (or all) of our group disbanded while we were zoning. */
if (IsLFP()) { UpdateLFP(); }
p_timers.SetCharID(CharacterID());
if (!p_timers.Load(&database)) {
LogError("Unable to load ability timers from the database for [{}] ([{}])!", GetCleanName(), CharacterID());
}
/* Load Spell Slot Refresh from Currently Memoried Spells */
for (unsigned int i = 0; i < EQEmu::spells::SPELL_GEM_COUNT; ++i)
if (IsValidSpell(m_pp.mem_spells[i]))
m_pp.spellSlotRefresh[i] = p_timers.GetRemainingTime(pTimerSpellStart + m_pp.mem_spells[i]) * 1000;
/* Ability slot refresh send SK/PAL */
if (m_pp.class_ == SHADOWKNIGHT || m_pp.class_ == PALADIN) {
uint32 abilitynum = 0;
if (m_pp.class_ == SHADOWKNIGHT) { abilitynum = pTimerHarmTouch; }
else { abilitynum = pTimerLayHands; }
uint32 remaining = p_timers.GetRemainingTime(abilitynum);
if (remaining > 0 && remaining < 15300)
m_pp.abilitySlotRefresh = remaining * 1000;
else
m_pp.abilitySlotRefresh = 0;
}
#ifdef _EQDEBUG
printf("Dumping inventory on load:\n");
m_inv.dumpEntireInventory();
#endif
/* Reset to max so they dont drown on zone in if its underwater */
m_pp.air_remaining = 60;
/* Check for PVP Zone status*/
if (zone->IsPVPZone())
m_pp.pvp = 1;
/* Time entitled on Account: Move to account */
m_pp.timeentitledonaccount = database.GetTotalTimeEntitledOnAccount(AccountID()) / 1440;
/* Reset rest timer if the durations have been lowered in the database */
if ((m_pp.RestTimer > RuleI(Character, RestRegenTimeToActivate)) && (m_pp.RestTimer > RuleI(Character, RestRegenRaidTimeToActivate)))
m_pp.RestTimer = 0;
/* This checksum should disappear once dynamic structs are in... each struct strategy will do it */ // looks to be in place now
//CRC32::SetEQChecksum((unsigned char*)&m_pp, sizeof(PlayerProfile_Struct) - sizeof(m_pp.m_player_profile_version) - 4);
// m_pp.checksum = 0; // All server out-bound player profile packets are now translated - no need to waste cycles calculating this...
outapp = new EQApplicationPacket(OP_PlayerProfile, sizeof(PlayerProfile_Struct));
/* The entityid field in the Player Profile is used by the Client in relation to Group Leadership AA */
m_pp.entityid = GetID();
memcpy(outapp->pBuffer, &m_pp, outapp->size);
outapp->priority = 6;
FastQueuePacket(&outapp);
if (m_pp.RestTimer)
rest_timer.Start(m_pp.RestTimer * 1000);
/* Load Pet */
database.LoadPetInfo(this);
if (m_petinfo.SpellID > 1 && !GetPet() && m_petinfo.SpellID <= SPDAT_RECORDS) {
MakePoweredPet(m_petinfo.SpellID, spells[m_petinfo.SpellID].teleport_zone, m_petinfo.petpower, m_petinfo.Name, m_petinfo.size);
if (GetPet() && GetPet()->IsNPC()) {
NPC *pet = GetPet()->CastToNPC();
pet->SetPetState(m_petinfo.Buffs, m_petinfo.Items);
pet->CalcBonuses();
pet->SetHP(m_petinfo.HP);
pet->SetMana(m_petinfo.Mana);
}
m_petinfo.SpellID = 0;
}
/* Moved here so it's after where we load the pet data. */
if (!GetAA(aaPersistentMinion))
memset(&m_suspendedminion, 0, sizeof(PetInfo));
/* Server Zone Entry Packet */
outapp = new EQApplicationPacket(OP_ZoneEntry, sizeof(ServerZoneEntry_Struct));
ServerZoneEntry_Struct* sze = (ServerZoneEntry_Struct*)outapp->pBuffer;
FillSpawnStruct(&sze->player, CastToMob());
sze->player.spawn.curHp = 1;
sze->player.spawn.NPC = 0;
sze->player.spawn.z += 6; //arbitrary lift, seems to help spawning under zone.
outapp->priority = 6;
FastQueuePacket(&outapp);
/* Zone Spawns Packet */
entity_list.SendZoneSpawnsBulk(this);
entity_list.SendZoneCorpsesBulk(this);
entity_list.SendZonePVPUpdates(this); //hack until spawn struct is fixed.
/* Time of Day packet */
outapp = new EQApplicationPacket(OP_TimeOfDay, sizeof(TimeOfDay_Struct));
TimeOfDay_Struct* tod = (TimeOfDay_Struct*)outapp->pBuffer;
zone->zone_time.GetCurrentEQTimeOfDay(time(0), tod);
outapp->priority = 6;
FastQueuePacket(&outapp);
/* Tribute Packets */
DoTributeUpdate();
if (m_pp.tribute_active) {
//restart the tribute timer where we left off
tribute_timer.Start(m_pp.tribute_time_remaining);
}
/*
Character Inventory Packet
this is not quite where live sends inventory, they do it after tribute
*/
if (loaditems) { /* Dont load if a length error occurs */
if (admin >= minStatusToBeGM)
m_inv.SetGMInventory(true); // set to true to allow expansion-restricted packets through
BulkSendInventoryItems();
/* Send stuff on the cursor which isnt sent in bulk */
for (auto iter = m_inv.cursor_cbegin(); iter != m_inv.cursor_cend(); ++iter) {
/* First item cursor is sent in bulk inventory packet */
if (iter == m_inv.cursor_cbegin())
continue;
const EQEmu::ItemInstance *inst = *iter;
SendItemPacket(EQEmu::invslot::slotCursor, inst, ItemPacketLimbo);
}
// this is kinda hackish atm..this process needs to be realigned to allow a contiguous flow
m_inv.SetGMInventory((bool)m_pp.gm); // reset back to current gm state
}
/* Task Packets */
LoadClientTaskState();
/**
* DevTools Load Settings
*/
if (Admin() >= EQEmu::DevTools::GM_ACCOUNT_STATUS_LEVEL) {
std::string dev_tools_window_key = StringFormat("%i-dev-tools-window-disabled", AccountID());
if (DataBucket::GetData(dev_tools_window_key) == "true") {
dev_tools_window_enabled = false;
}
}
if (m_ClientVersionBit & EQEmu::versions::maskUFAndLater) {
outapp = new EQApplicationPacket(OP_XTargetResponse, 8);
outapp->WriteUInt32(GetMaxXTargets());
outapp->WriteUInt32(0);
FastQueuePacket(&outapp);
}
/*
Weather Packet
This shouldent be moved, this seems to be what the client
uses to advance to the next state (sending ReqNewZone)
*/
outapp = new EQApplicationPacket(OP_Weather, 12);
Weather_Struct *ws = (Weather_Struct *)outapp->pBuffer;
ws->val1 = 0x000000FF;
if (zone->zone_weather == 1) { ws->type = 0x31; } // Rain
if (zone->zone_weather == 2) {
outapp->pBuffer[8] = 0x01;
ws->type = 0x02;
}
outapp->priority = 6;
QueuePacket(outapp);
safe_delete(outapp);
if (ClientVersion() >= EQEmu::versions::ClientVersion::RoF) {
Handle_Connect_OP_ReqNewZone(nullptr);
}
SetAttackTimer();
conn_state = ZoneInfoSent;
zoneinpacket_timer.Start();
return;
}
// connected opcode handlers
void Client::Handle_0x0193(const EQApplicationPacket *app)
{
// Not sure what this opcode does. It started being sent when OP_ClientUpdate was
// changed to pump OP_ClientUpdate back out instead of OP_MobUpdate
// 2 bytes: 00 00
return;
}
void Client::Handle_OP_AAAction(const EQApplicationPacket *app)
{
LogAA("Received OP_AAAction");
if (app->size != sizeof(AA_Action)) {
LogAA("Error! OP_AAAction size didnt match!");
return;
}
AA_Action* action = (AA_Action*)app->pBuffer;
if (action->action == aaActionActivate) {//AA Hotkey
LogAA("Activating AA [{}]", action->ability);
ActivateAlternateAdvancementAbility(action->ability, action->target_id);
}
else if (action->action == aaActionBuy) {
PurchaseAlternateAdvancementRank(action->ability);
}
else if (action->action == aaActionDisableEXP) { //Turn Off AA Exp
if (m_epp.perAA > 0)
MessageString(Chat::White, AA_OFF);
m_epp.perAA = 0;
SendAlternateAdvancementStats();
}
else if (action->action == aaActionSetEXP) {
if (m_epp.perAA == 0)
MessageString(Chat::White, AA_ON);
m_epp.perAA = action->exp_value;
if (m_epp.perAA < 0 || m_epp.perAA > 100)
m_epp.perAA = 0; // stop exploit with sanity check
// send an update
SendAlternateAdvancementStats();
SendAlternateAdvancementTable();
}
else {
LogAA("Unknown AA action : [{}] [{}] [{}] [{}]", action->action, action->ability, action->target_id, action->exp_value);
}
}
void Client::Handle_OP_AcceptNewTask(const EQApplicationPacket *app)
{
if (app->size != sizeof(AcceptNewTask_Struct)) {
LogDebug("Size mismatch in OP_AcceptNewTask expected [{}] got [{}]", sizeof(AcceptNewTask_Struct), app->size);
DumpPacket(app);
return;
}
AcceptNewTask_Struct *ant = (AcceptNewTask_Struct*)app->pBuffer;
if (ant->task_id > 0 && RuleB(TaskSystem, EnableTaskSystem) && taskstate)
taskstate->AcceptNewTask(this, ant->task_id, ant->task_master_id);
}
void Client::Handle_OP_AdventureInfoRequest(const EQApplicationPacket *app)
{
if (app->size < sizeof(EntityId_Struct))
{
LogError("Handle_OP_AdventureInfoRequest had a packet that was too small");
return;
}
EntityId_Struct* ent = (EntityId_Struct*)app->pBuffer;
Mob * m = entity_list.GetMob(ent->entity_id);
if (m && m->IsNPC())
{
std::map<uint32, std::string>::iterator it;
it = zone->adventure_entry_list_flavor.find(m->CastToNPC()->GetAdventureTemplate());
if (it != zone->adventure_entry_list_flavor.end())
{
auto outapp = new EQApplicationPacket(OP_AdventureInfo, (it->second.size() + 2));
strn0cpy((char*)outapp->pBuffer, it->second.c_str(), it->second.size());
FastQueuePacket(&outapp);
}
else
{
if (m->CastToNPC()->GetAdventureTemplate() != 0)
{
std::string text = "Choose your difficulty and preferred adventure type.";
auto outapp = new EQApplicationPacket(OP_AdventureInfo, (text.size() + 2));
strn0cpy((char*)outapp->pBuffer, text.c_str(), text.size());
FastQueuePacket(&outapp);
}
}
}
}
void Client::Handle_OP_AdventureLeaderboardRequest(const EQApplicationPacket *app)
{
if (app->size < sizeof(AdventureLeaderboardRequest_Struct))
{
return;
}
if (adventure_leaderboard_timer)
{
return;
}
adventure_leaderboard_timer = new Timer(4000);
auto pack = new ServerPacket(ServerOP_AdventureLeaderboard, sizeof(ServerLeaderboardRequest_Struct));
ServerLeaderboardRequest_Struct *lr = (ServerLeaderboardRequest_Struct*)pack->pBuffer;
strcpy(lr->player, GetName());
AdventureLeaderboardRequest_Struct *lrs = (AdventureLeaderboardRequest_Struct*)app->pBuffer;
lr->type = 1 + (lrs->theme * 2) + lrs->type;
worldserver.SendPacket(pack);
delete pack;
}
void Client::Handle_OP_AdventureMerchantPurchase(const EQApplicationPacket *app)
{
if (app->size != sizeof(Adventure_Purchase_Struct))
{
LogError("OP size error: OP_AdventureMerchantPurchase expected:[{}] got:[{}]", sizeof(Adventure_Purchase_Struct), app->size);
return;
}
Adventure_Purchase_Struct* aps = (Adventure_Purchase_Struct*)app->pBuffer;
/*
Get item apc->itemid (can check NPC if thats necessary), ldon point theme check only if theme is not 0 (I am not sure what 1-5 are though for themes)
if(ldon_points_available >= item ldonpointcost)
{
give item (67 00 00 00 for the packettype using opcode 0x02c5)
ldon_points_available -= ldonpointcost;
}
*/
uint32 merchantid = 0;
Mob* tmp = entity_list.GetMob(aps->npcid);
if (tmp == 0 || !tmp->IsNPC() || ((tmp->GetClass() != ADVENTUREMERCHANT) &&
(tmp->GetClass() != DISCORD_MERCHANT) && (tmp->GetClass() != NORRATHS_KEEPERS_MERCHANT) && (tmp->GetClass() != DARK_REIGN_MERCHANT)))
return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, tmp->GetPosition()) > USE_NPC_RANGE2)
return;
merchantid = tmp->CastToNPC()->MerchantType;
const EQEmu::ItemData* item = nullptr;
bool found = false;
std::list<MerchantList> merlist = zone->merchanttable[merchantid];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tmp->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (!item)
continue;
if (item->ID == aps->itemid) { //This check to make sure that the item is actually on the NPC, people attempt to inject packets to get items summoned...
found = true;
break;
}
}
if (!item || !found) {
Message(Chat::Red, "Error: The item you purchased does not exist!");
return;
}
if (aps->Type == LDoNMerchant)
{
if (m_pp.ldon_points_available < int32(item->LDoNPrice)) {
Message(Chat::Red, "You cannot afford that item.");
return;
}
if (item->LDoNTheme <= 16)
{
if (item->LDoNTheme & 16)
{
if (m_pp.ldon_points_tak < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in tak to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (item->LDoNTheme & 8)
{
if (m_pp.ldon_points_ruj < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in ruj to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (item->LDoNTheme & 4)
{
if (m_pp.ldon_points_mmc < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in mmc to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (item->LDoNTheme & 2)
{
if (m_pp.ldon_points_mir < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in mir to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (item->LDoNTheme & 1)
{
if (m_pp.ldon_points_guk < int32(item->LDoNPrice))
{
Message(Chat::Red, "You need at least %u points in guk to purchase this item.", int32(item->LDoNPrice));
return;
}
}
}
}
else if (aps->Type == DiscordMerchant)
{
if (GetPVPPoints() < item->LDoNPrice)
{
Message(Chat::Red, "You need at least %u PVP points to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (aps->Type == NorrathsKeepersMerchant)
{
if (GetRadiantCrystals() < item->LDoNPrice)
{
Message(Chat::Red, "You need at least %u Radiant Crystals to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else if (aps->Type == DarkReignMerchant)
{
if (GetEbonCrystals() < item->LDoNPrice)
{
Message(Chat::Red, "You need at least %u Ebon Crystals to purchase this item.", int32(item->LDoNPrice));
return;
}
}
else
{
Message(Chat::Red, "Unknown Adventure Merchant type.");
return;
}
if (CheckLoreConflict(item))
{
Message(Chat::Yellow, "You can only have one of a lore item.");
return;
}
if (aps->Type == LDoNMerchant)
{
int32 requiredpts = (int32)item->LDoNPrice*-1;
if (!UpdateLDoNPoints(requiredpts, 6))
return;
}
else if (aps->Type == DiscordMerchant)
{
SetPVPPoints(GetPVPPoints() - (int32)item->LDoNPrice);
SendPVPStats();
}
else if (aps->Type == NorrathsKeepersMerchant)
{
SetRadiantCrystals(GetRadiantCrystals() - (int32)item->LDoNPrice);
SendCrystalCounts();
}
else if (aps->Type == DarkReignMerchant)
{
SetEbonCrystals(GetEbonCrystals() - (int32)item->LDoNPrice);
SendCrystalCounts();
}
int16 charges = 1;
if (item->MaxCharges != 0)
charges = item->MaxCharges;
EQEmu::ItemInstance *inst = database.CreateItem(item, charges);
if (!AutoPutLootInInventory(*inst, true, true))
{
PutLootInInventory(EQEmu::invslot::slotCursor, *inst);
}
Save(1);
}
void Client::Handle_OP_AdventureMerchantRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(AdventureMerchant_Struct))
{
LogError("OP size error: OP_AdventureMerchantRequest expected:[{}] got:[{}]", sizeof(AdventureMerchant_Struct), app->size);
return;
}
std::stringstream ss(std::stringstream::in | std::stringstream::out);
uint8 count = 0;
AdventureMerchant_Struct* eid = (AdventureMerchant_Struct*)app->pBuffer;
uint32 merchantid = 0;
Mob* tmp = entity_list.GetMob(eid->entity_id);
if (tmp == 0 || !tmp->IsNPC() || ((tmp->GetClass() != ADVENTUREMERCHANT) &&
(tmp->GetClass() != DISCORD_MERCHANT) && (tmp->GetClass() != NORRATHS_KEEPERS_MERCHANT) && (tmp->GetClass() != DARK_REIGN_MERCHANT)))
return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, tmp->GetPosition()) > USE_NPC_RANGE2)
return;
merchantid = tmp->CastToNPC()->MerchantType;
const EQEmu::ItemData *item = nullptr;
std::list<MerchantList> merlist = zone->merchanttable[merchantid];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end() && count<255; ++itr) {
const MerchantList &ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tmp->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (item)
{
uint32 theme;
if (item->LDoNTheme > 16)
{
theme = 0;
}
else if (item->LDoNTheme & 16)
{
theme = 5;
}
else if (item->LDoNTheme & 8)
{
theme = 4;
}
else if (item->LDoNTheme & 4)
{
theme = 3;
}
else if (item->LDoNTheme & 2)
{
theme = 2;
}
else if (item->LDoNTheme & 1)
{
theme = 1;
}
else
{
theme = 0;
}
ss << "^" << item->Name << "|";
ss << item->ID << "|";
ss << item->LDoNPrice << "|";
ss << theme << "|";
ss << (item->Stackable ? 1 : 0) << "|";
ss << (item->LoreFlag ? 1 : 0) << "|";
ss << item->Races << "|";
ss << item->Classes;
count++;
}
}
//Count
//^Item Name,Item ID,Cost in Points,Theme (0=none),0,1,races bit map,classes bitmap
EQApplicationPacket* outapp = new EQApplicationPacket(OP_AdventureMerchantResponse, ss.str().size() + 2);
outapp->pBuffer[0] = count;
strn0cpy((char*)&outapp->pBuffer[1], ss.str().c_str(), ss.str().size());
FastQueuePacket(&outapp);
}
void Client::Handle_OP_AdventureMerchantSell(const EQApplicationPacket *app)
{
if (app->size != sizeof(Adventure_Sell_Struct))
{
LogDebug("Size mismatch on OP_AdventureMerchantSell: got [{}] expected [{}]", app->size, sizeof(Adventure_Sell_Struct));
DumpPacket(app);
return;
}
Adventure_Sell_Struct *ams_in = (Adventure_Sell_Struct*)app->pBuffer;
Mob* vendor = entity_list.GetMob(ams_in->npcid);
if (vendor == 0 || !vendor->IsNPC() || ((vendor->GetClass() != ADVENTUREMERCHANT) &&
(vendor->GetClass() != NORRATHS_KEEPERS_MERCHANT) && (vendor->GetClass() != DARK_REIGN_MERCHANT)))
{
Message(Chat::Red, "Vendor was not found.");
return;
}
if (DistanceSquared(m_Position, vendor->GetPosition()) > USE_NPC_RANGE2)
{
Message(Chat::Red, "Vendor is out of range.");
return;
}
uint32 itemid = GetItemIDAt(ams_in->slot);
if (itemid == 0)
{
Message(Chat::Red, "Found no item at that slot.");
return;
}
const EQEmu::ItemData* item = database.GetItem(itemid);
EQEmu::ItemInstance* inst = GetInv().GetItem(ams_in->slot);
if (!item || !inst) {
Message(Chat::Red, "You seemed to have misplaced that item...");
return;
}
// Note that Lucy has ldonsold values of 4 and 5 for items sold by Norrath's Keepers and Dark Reign, whereas 13th Floor
// has ldonsold = 0 for these items, so some manual editing of the items DB will be required to support sell back of the
// items.
//
// The Merchant seems to have some other way of knowing whether he will accept the item, other than the ldonsold field,
// e.g. if you summon items 76036 and 76053 (good and evil versions of Spell: Ward Of Vengeance), if you are interacting
// with a Norrath's Keeper merchant and click on 76036 in your inventory, he says he will give you radiant crystals for
// it, but he will refuse for item 76053.
//
// Similarly, just giving a cloth cap an ldonsold value of 4 will not make the Merchant buy it.
//
// Note that the the Client will not allow you to sell anything back to a Discord merchant, so there is no need to handle
// that case here.
if (item->LDoNSold == 0)
{
Message(Chat::Red, "The merchant does not want that item.");
return;
}
if (item->LDoNPrice == 0)
{
Message(Chat::Red, "The merchant does not want that item.");
return;
}
// 06/11/2016 This formula matches RoF2 client side calculation.
int32 price = (item->LDoNPrice + 1) * item->LDoNSellBackRate / 100;
if (price == 0)
{
Message(Chat::Red, "The merchant does not want that item.");
return;
}
if (RuleB(EventLog, RecordSellToMerchant))
LogMerchant(this, vendor, ams_in->charges, price, item, false);
if (!inst->IsStackable())
{
DeleteItemInInventory(ams_in->slot, 0, false);
}
else
{
if (inst->GetCharges() < ams_in->charges)
{
ams_in->charges = inst->GetCharges();
}
if (ams_in->charges == 0)
{
Message(Chat::Red, "Charge mismatch error.");
return;
}
DeleteItemInInventory(ams_in->slot, ams_in->charges, false);
price *= ams_in->charges;
}
auto outapp = new EQApplicationPacket(OP_AdventureMerchantSell, sizeof(Adventure_Sell_Struct));
Adventure_Sell_Struct *ams = (Adventure_Sell_Struct*)outapp->pBuffer;
ams->slot = ams_in->slot;
ams->unknown000 = 1;
ams->npcid = ams->npcid;
ams->charges = ams_in->charges;
ams->sell_price = price;
FastQueuePacket(&outapp);
switch (vendor->GetClass())
{
case ADVENTUREMERCHANT:
{
UpdateLDoNPoints(price, 6);
break;
}
case NORRATHS_KEEPERS_MERCHANT:
{
SetRadiantCrystals(GetRadiantCrystals() + price);
break;
}
case DARK_REIGN_MERCHANT:
{
SetEbonCrystals(GetEbonCrystals() + price);
break;
}
default:
break;
}
Save(1);
}
void Client::Handle_OP_AdventureRequest(const EQApplicationPacket *app)
{
if (app->size < sizeof(AdventureRequest_Struct))
{
LogError("Handle_OP_AdventureRequest had a packet that was too small");
return;
}
if (IsOnAdventure())
{
return;
}
if (!p_timers.Expired(&database, pTimerStartAdventureTimer, false))
{
return;
}
if (GetPendingAdventureRequest())
{
return;
}
AdventureRequest_Struct* ars = (AdventureRequest_Struct*)app->pBuffer;
uint8 group_members = 0;
Raid *r = nullptr;
Group *g = nullptr;
if (IsRaidGrouped())
{
r = GetRaid();
group_members = r->RaidCount();
}
else if (IsGrouped())
{
g = GetGroup();
group_members = g->GroupCount();
}
else
{
return;
}
if (group_members < RuleI(Adventure, MinNumberForGroup) || group_members > RuleI(Adventure, MaxNumberForGroup))
{
return;
}
Mob* m = entity_list.GetMob(ars->entity_id);
uint32 template_id = 0;
if (m && m->IsNPC())
{
template_id = m->CastToNPC()->GetAdventureTemplate();
}
else
{
return;
}
auto packet =
new ServerPacket(ServerOP_AdventureRequest, sizeof(ServerAdventureRequest_Struct) + (64 * group_members));
ServerAdventureRequest_Struct *sar = (ServerAdventureRequest_Struct*)packet->pBuffer;
sar->member_count = group_members;
sar->risk = ars->risk;
sar->type = ars->type;
sar->template_id = template_id;
strcpy(sar->leader, GetName());
if (IsRaidGrouped())
{
int i = 0;
for (int x = 0; x < 72; ++x)
{
if (i == group_members)
{
break;
}
const char *c_name = nullptr;
c_name = r->GetClientNameByIndex(x);
if (c_name)
{
memcpy((packet->pBuffer + sizeof(ServerAdventureRequest_Struct) + (64 * i)), c_name, strlen(c_name));
++i;
}
}
}
else
{
int i = 0;
for (int x = 0; x < 6; ++x)
{
if (i == group_members)
{
break;
}
const char *c_name = nullptr;
c_name = g->GetClientNameByIndex(x);
if (c_name)
{
memcpy((packet->pBuffer + sizeof(ServerAdventureRequest_Struct) + (64 * i)), c_name, strlen(c_name));
++i;
}
}
}
worldserver.SendPacket(packet);
delete packet;
p_timers.Start(pTimerStartAdventureTimer, 5);
}
void Client::Handle_OP_AdventureStatsRequest(const EQApplicationPacket *app)
{
if (adventure_stats_timer)
{
return;
}
adventure_stats_timer = new Timer(8000);
auto outapp = new EQApplicationPacket(OP_AdventureStatsReply, sizeof(AdventureStats_Struct));
AdventureStats_Struct *as = (AdventureStats_Struct*)outapp->pBuffer;
if (database.GetAdventureStats(CharacterID(), as))
{
m_pp.ldon_wins_guk = as->success.guk;
m_pp.ldon_wins_mir = as->success.mir;
m_pp.ldon_wins_mmc = as->success.mmc;
m_pp.ldon_wins_ruj = as->success.ruj;
m_pp.ldon_wins_tak = as->success.tak;
m_pp.ldon_losses_guk = as->failure.guk;
m_pp.ldon_losses_mir = as->failure.mir;
m_pp.ldon_losses_mmc = as->failure.mmc;
m_pp.ldon_losses_ruj = as->failure.ruj;
m_pp.ldon_losses_tak = as->failure.tak;
}
FastQueuePacket(&outapp);
}
void Client::Handle_OP_AggroMeterLockTarget(const EQApplicationPacket *app)
{
if (app->size < sizeof(uint32)) {
LogError("Handle_OP_AggroMeterLockTarget had a packet that was too small");
return;
}
SetAggroMeterLock(app->ReadUInt32(0));
ProcessAggroMeter();
}
void Client::Handle_OP_AltCurrencyMerchantRequest(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencyMerchantRequest, app, uint32);
NPC* tar = entity_list.GetNPCByID(*((uint32*)app->pBuffer));
if (tar) {
if (DistanceSquared(m_Position, tar->GetPosition()) > USE_NPC_RANGE2)
return;
if (tar->GetClass() != ALT_CURRENCY_MERCHANT) {
return;
}
uint32 alt_cur_id = tar->GetAltCurrencyType();
if (alt_cur_id == 0) {
return;
}
auto altc_iter = zone->AlternateCurrencies.begin();
bool found = false;
while (altc_iter != zone->AlternateCurrencies.end()) {
if ((*altc_iter).id == alt_cur_id) {
found = true;
break;
}
++altc_iter;
}
if (!found) {
return;
}
std::stringstream ss(std::stringstream::in | std::stringstream::out);
std::stringstream item_ss(std::stringstream::in | std::stringstream::out);
ss << alt_cur_id << "|1|" << alt_cur_id;
uint32 count = 0;
uint32 merchant_id = tar->MerchantType;
const EQEmu::ItemData *item = nullptr;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end() && count < 255; ++itr) {
const MerchantList &ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tar->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (item)
{
item_ss << "^" << item->Name << "|";
item_ss << item->ID << "|";
item_ss << ml.alt_currency_cost << "|";
item_ss << "0|";
item_ss << "1|";
item_ss << item->Races << "|";
item_ss << item->Classes;
count++;
}
}
if (count > 0) {
ss << "|" << count << item_ss.str();
}
else {
ss << "|0";
}
EQApplicationPacket* outapp = new EQApplicationPacket(OP_AltCurrencyMerchantReply, ss.str().length() + 1);
memcpy(outapp->pBuffer, ss.str().c_str(), ss.str().length());
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_AltCurrencyPurchase(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencyPurchase, app, AltCurrencyPurchaseItem_Struct);
AltCurrencyPurchaseItem_Struct *purchase = (AltCurrencyPurchaseItem_Struct*)app->pBuffer;
NPC* tar = entity_list.GetNPCByID(purchase->merchant_entity_id);
if (tar) {
if (DistanceSquared(m_Position, tar->GetPosition())> USE_NPC_RANGE2)
return;
if (tar->GetClass() != ALT_CURRENCY_MERCHANT) {
return;
}
uint32 alt_cur_id = tar->GetAltCurrencyType();
if (alt_cur_id == 0) {
return;
}
const EQEmu::ItemData* item = nullptr;
uint32 cost = 0;
uint32 current_currency = GetAlternateCurrencyValue(alt_cur_id);
uint32 merchant_id = tar->MerchantType;
bool found = false;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tar->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (!item)
continue;
if (item->ID == purchase->item_id) { //This check to make sure that the item is actually on the NPC, people attempt to inject packets to get items summoned...
cost = ml.alt_currency_cost;
found = true;
break;
}
}
if (!item || !found) {
Message(Chat::Red, "Error: The item you purchased does not exist!");
return;
}
if (cost > current_currency) {
Message(Chat::Red, "You cannot afford that item right now.");
return;
}
if (CheckLoreConflict(item))
{
Message(Chat::Yellow, "You can only have one of a lore item.");
return;
}
/* QS: PlayerLogAlternateCurrencyTransactions :: Merchant Purchase */
if (RuleB(QueryServ, PlayerLogAlternateCurrencyTransactions)) {
std::string event_desc = StringFormat("Merchant Purchase :: Spent alt_currency_id:%i cost:%i for itemid:%i in zoneid:%i instid:%i", alt_cur_id, cost, item->ID, this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Alternate_Currency_Transactions, this->CharacterID(), event_desc);
}
AddAlternateCurrencyValue(alt_cur_id, -((int32)cost));
int16 charges = 1;
if (item->MaxCharges != 0)
charges = item->MaxCharges;
EQEmu::ItemInstance *inst = database.CreateItem(item, charges);
if (!AutoPutLootInInventory(*inst, true, true))
{
PutLootInInventory(EQEmu::invslot::slotCursor, *inst);
}
Save(1);
}
}
void Client::Handle_OP_AltCurrencyReclaim(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencyReclaim, app, AltCurrencyReclaim_Struct);
AltCurrencyReclaim_Struct *reclaim = (AltCurrencyReclaim_Struct*)app->pBuffer;
uint32 item_id = 0;
auto iter = zone->AlternateCurrencies.begin();
while (iter != zone->AlternateCurrencies.end()) {
if ((*iter).id == reclaim->currency_id) {
item_id = (*iter).item_id;
}
++iter;
}
if (item_id == 0) {
return;
}
/* Item to Currency Storage */
if (reclaim->reclaim_flag == 1) {
uint32 removed = NukeItem(item_id, invWhereWorn | invWherePersonal | invWhereCursor);
if (removed > 0) {
AddAlternateCurrencyValue(reclaim->currency_id, removed);
/* QS: PlayerLogAlternateCurrencyTransactions :: Item to Currency */
if (RuleB(QueryServ, PlayerLogAlternateCurrencyTransactions)) {
std::string event_desc = StringFormat("Reclaim :: Item to Currency :: alt_currency_id:%i amount:%i to currency tab in zoneid:%i instid:%i", reclaim->currency_id, removed, this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Alternate_Currency_Transactions, this->CharacterID(), event_desc);
}
}
}
/* Cursor to Item storage */
else {
uint32 max_currency = GetAlternateCurrencyValue(reclaim->currency_id);
if (max_currency == 0 || reclaim->count == 0)
return;
/* If you input more than you have currency wise, just give the max of the currency you currently have */
if (reclaim->count > max_currency) {
SummonItem(item_id, max_currency);
SetAlternateCurrencyValue(reclaim->currency_id, 0);
}
else {
SummonItem(item_id, reclaim->count, 0, 0, 0, 0, 0, 0, false, EQEmu::invslot::slotCursor);
AddAlternateCurrencyValue(reclaim->currency_id, -((int32)reclaim->count));
}
/* QS: PlayerLogAlternateCurrencyTransactions :: Cursor to Item Storage */
if (RuleB(QueryServ, PlayerLogAlternateCurrencyTransactions)) {
std::string event_desc = StringFormat("Reclaim :: Cursor to Item :: alt_currency_id:%i amount:-%i in zoneid:%i instid:%i", reclaim->currency_id, reclaim->count, this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Alternate_Currency_Transactions, this->CharacterID(), event_desc);
}
}
}
void Client::Handle_OP_AltCurrencySell(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencySell, app, AltCurrencySellItem_Struct);
EQApplicationPacket *outapp = app->Copy();
AltCurrencySellItem_Struct *sell = (AltCurrencySellItem_Struct*)outapp->pBuffer;
NPC* tar = entity_list.GetNPCByID(sell->merchant_entity_id);
if (tar) {
if (DistanceSquared(m_Position, tar->GetPosition()) > USE_NPC_RANGE2)
return;
if (tar->GetClass() != ALT_CURRENCY_MERCHANT) {
return;
}
uint32 alt_cur_id = tar->GetAltCurrencyType();
if (alt_cur_id == 0) {
return;
}
EQEmu::ItemInstance* inst = GetInv().GetItem(sell->slot_id);
if (!inst) {
return;
}
if (!RuleB(Merchant, EnableAltCurrencySell)) {
return;
}
const EQEmu::ItemData* item = nullptr;
uint32 cost = 0;
uint32 current_currency = GetAlternateCurrencyValue(alt_cur_id);
uint32 merchant_id = tar->MerchantType;
uint32 npc_id = tar->GetNPCTypeID();
bool found = false;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tar->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (!item)
continue;
if (item->ID == inst->GetItem()->ID) {
cost = ml.alt_currency_cost;
found = true;
break;
}
}
if (!found) {
return;
}
if (!inst->IsStackable())
{
DeleteItemInInventory(sell->slot_id, 0, false);
}
else
{
if (inst->GetCharges() < sell->charges)
{
sell->charges = inst->GetCharges();
}
if (sell->charges == 0)
{
Message(Chat::Red, "Charge mismatch error.");
return;
}
DeleteItemInInventory(sell->slot_id, sell->charges, false);
cost *= sell->charges;
}
sell->cost = cost;
/* QS: PlayerLogAlternateCurrencyTransactions :: Sold to Merchant*/
if (RuleB(QueryServ, PlayerLogAlternateCurrencyTransactions)) {
std::string event_desc = StringFormat("Sold to Merchant :: itemid:%u npcid:%u alt_currency_id:%u cost:%u in zoneid:%u instid:%i", item->ID, npc_id, alt_cur_id, cost, this->GetZoneID(), this->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Alternate_Currency_Transactions, this->CharacterID(), event_desc);
}
FastQueuePacket(&outapp);
AddAlternateCurrencyValue(alt_cur_id, cost);
Save(1);
}
}
void Client::Handle_OP_AltCurrencySellSelection(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_AltCurrencySellSelection, app, AltCurrencySelectItem_Struct);
AltCurrencySelectItem_Struct *select = (AltCurrencySelectItem_Struct*)app->pBuffer;
NPC* tar = entity_list.GetNPCByID(select->merchant_entity_id);
if (tar) {
if (DistanceSquared(m_Position, tar->GetPosition()) > USE_NPC_RANGE2)
return;
if (tar->GetClass() != ALT_CURRENCY_MERCHANT) {
return;
}
uint32 alt_cur_id = tar->GetAltCurrencyType();
if (alt_cur_id == 0) {
return;
}
EQEmu::ItemInstance *inst = m_inv.GetItem(select->slot_id);
if (!inst) {
return;
}
const EQEmu::ItemData* item = nullptr;
uint32 cost = 0;
uint32 current_currency = GetAlternateCurrencyValue(alt_cur_id);
uint32 merchant_id = tar->MerchantType;
if (RuleB(Merchant, EnableAltCurrencySell)) {
bool found = false;
std::list<MerchantList> merlist = zone->merchanttable[merchant_id];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
int32 fac = tar->GetPrimaryFaction();
if (fac != 0 && GetModCharacterFactionLevel(fac) < ml.faction_required) {
continue;
}
item = database.GetItem(ml.item);
if (!item)
continue;
if (item->ID == inst->GetItem()->ID) {
cost = ml.alt_currency_cost;
found = true;
break;
}
}
if (!found) {
cost = 0;
}
}
else {
cost = 0;
}
auto outapp =
new EQApplicationPacket(OP_AltCurrencySellSelection, sizeof(AltCurrencySelectItemReply_Struct));
AltCurrencySelectItemReply_Struct *reply = (AltCurrencySelectItemReply_Struct*)outapp->pBuffer;
reply->unknown004 = 0xFF;
reply->unknown005 = 0xFF;
reply->unknown006 = 0xFF;
reply->unknown007 = 0xFF;
strcpy(reply->item_name, inst->GetItem()->Name);
reply->cost = cost;
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_Animation(const EQApplicationPacket *app)
{
if (app->size != sizeof(Animation_Struct)) {
LogError("Received invalid sized OP_Animation: got [{}], expected [{}]", app->size, sizeof(Animation_Struct));
DumpPacket(app);
return;
}
Animation_Struct *s = (Animation_Struct *)app->pBuffer;
//might verify spawn ID, but it wouldent affect anything
DoAnim(s->action, s->speed);
return;
}
void Client::Handle_OP_ApplyPoison(const EQApplicationPacket *app)
{
if (app->size != sizeof(ApplyPoison_Struct)) {
LogError("Wrong size: OP_ApplyPoison, size=[{}], expected [{}]", app->size, sizeof(ApplyPoison_Struct));
DumpPacket(app);
return;
}
ApplyPoison_Struct* ApplyPoisonData = (ApplyPoison_Struct*)app->pBuffer;
uint32 ApplyPoisonSuccessResult = 0;
const EQEmu::ItemInstance* PoisonItemInstance = GetInv().GetItem(ApplyPoisonData->inventorySlot);
const EQEmu::ItemData* poison = (PoisonItemInstance ? PoisonItemInstance->GetItem() : nullptr);
bool IsPoison = (poison && poison->ItemType == EQEmu::item::ItemTypePoison);
if (IsPoison && GetClass() == ROGUE) {
// Live always checks for skillup, even when poison is too high
CheckIncreaseSkill(EQEmu::skills::SkillApplyPoison, nullptr, 10);
if (poison->Proc.Level2 > GetLevel()) {
// Poison is too high to apply.
MessageString(Chat::LightBlue, POISON_TOO_HIGH);
}
else {
double ChanceRoll = zone->random.Real(0, 1);
// Poisons that use this skill (old world poisons) almost
// never fail to apply. I did 25 applies of a trivial 120+
// poison with an apply skill of 48 and they all worked.
// Also did 25 straight poisons at apply skill 248 for very
// high end and they never failed.
// Apply poison ranging from 1-9, 28/30 worked for a level 18..
// Poisons that don't proc until a level higher than the
// rogue simply won't apply at all, no skill check done.
uint16 poison_skill = GetSkill(EQEmu::skills::SkillApplyPoison);
if (ChanceRoll < (.75 + poison_skill / 1000)) {
ApplyPoisonSuccessResult = 1;
AddProcToWeapon(poison->Proc.Effect, false, (GetDEX() / 100) + 103, POISON_PROC);
}
}
// Live always deletes the item, success or failure. Even if too high.
DeleteItemInInventory(ApplyPoisonData->inventorySlot, 1, true);
}
auto outapp = new EQApplicationPacket(OP_ApplyPoison, nullptr, sizeof(ApplyPoison_Struct));
ApplyPoison_Struct* ApplyPoisonResult = (ApplyPoison_Struct*)outapp->pBuffer;
ApplyPoisonResult->success = ApplyPoisonSuccessResult;
ApplyPoisonResult->inventorySlot = ApplyPoisonData->inventorySlot;
FastQueuePacket(&outapp);
}
void Client::Handle_OP_Assist(const EQApplicationPacket *app)
{
if (app->size != sizeof(EntityId_Struct)) {
LogDebug("Size mismatch in OP_Assist expected [{}] got [{}]", sizeof(EntityId_Struct), app->size);
return;
}
EntityId_Struct* eid = (EntityId_Struct*)app->pBuffer;
Entity* entity = entity_list.GetID(eid->entity_id);
EQApplicationPacket* outapp = app->Copy();
eid = (EntityId_Struct*)outapp->pBuffer;
if (RuleB(Combat, AssistNoTargetSelf))
eid->entity_id = GetID();
if (entity && entity->IsMob()) {
Mob *assistee = entity->CastToMob();
if (assistee->GetTarget()) {
Mob *new_target = assistee->GetTarget();
if (new_target && (GetGM() ||
Distance(m_Position, assistee->GetPosition()) <= TARGETING_RANGE)) {
eid->entity_id = new_target->GetID();
} else {
eid->entity_id = 0;
}
} else {
eid->entity_id = 0;
}
}
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_AssistGroup(const EQApplicationPacket *app)
{
if (app->size != sizeof(EntityId_Struct)) {
LogDebug("Size mismatch in OP_AssistGroup expected [{}] got [{}]", sizeof(EntityId_Struct), app->size);
return;
}
QueuePacket(app);
return;
}
void Client::Handle_OP_AugmentInfo(const EQApplicationPacket *app)
{
// This packet is sent by the client when an Augment item information window is opened.
// Some clients this seems to nuke the charm text (ex. Adventurer's Stone)
if (app->size != sizeof(AugmentInfo_Struct)) {
LogDebug("Size mismatch in OP_AugmentInfo expected [{}] got [{}]", sizeof(AugmentInfo_Struct), app->size);
DumpPacket(app);
return;
}
AugmentInfo_Struct* AugInfo = (AugmentInfo_Struct*)app->pBuffer;
const EQEmu::ItemData * item = database.GetItem(AugInfo->itemid);
if (item) {
strn0cpy(AugInfo->augment_info, item->Name, 64);
AugInfo->itemid = 0;
QueuePacket(app);
}
}
void Client::Handle_OP_AugmentItem(const EQApplicationPacket *app)
{
if (app->size != sizeof(AugmentItem_Struct)) {
LogError("Invalid size for AugmentItem_Struct: Expected: [{}], Got: [{}]",
sizeof(AugmentItem_Struct), app->size);
return;
}
AugmentItem_Struct* in_augment = (AugmentItem_Struct*)app->pBuffer;
bool deleteItems = false;
if (ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
{
if ((in_augment->container_slot < EQEmu::invslot::EQUIPMENT_BEGIN || in_augment->container_slot > EQEmu::invslot::GENERAL_END) &&
(in_augment->container_slot < EQEmu::invbag::GENERAL_BAGS_BEGIN || in_augment->container_slot > EQEmu::invbag::GENERAL_BAGS_END))
{
Message(Chat::Red, "The server does not allow augmentation actions from this slot.");
auto cursor_item = m_inv[EQEmu::invslot::slotCursor];
auto augmented_item = m_inv[in_augment->container_slot];
SendItemPacket(EQEmu::invslot::slotCursor, cursor_item, ItemPacketCharInventory);
// this may crash clients on certain slots
SendItemPacket(in_augment->container_slot, augmented_item, ItemPacketCharInventory);
return;
}
EQEmu::ItemInstance *itemOneToPush = nullptr, *itemTwoToPush = nullptr;
//Log(Logs::DebugLevel::Moderate, Logs::Debug, "cslot: [{}] aslot: [{}] cidx: [{}] aidx: [{}] act: [{}] dest: [{}]",
// in_augment->container_slot, in_augment->augment_slot, in_augment->container_index, in_augment->augment_index, in_augment->augment_action, in_augment->dest_inst_id);
EQEmu::ItemInstance *tobe_auged = nullptr, *old_aug = nullptr, *new_aug = nullptr, *aug = nullptr, *solvent = nullptr;
EQEmu::InventoryProfile& user_inv = GetInv();
uint16 item_slot = in_augment->container_slot;
uint16 solvent_slot = in_augment->augment_slot;
uint8 mat = EQEmu::InventoryProfile::CalcMaterialFromSlot(item_slot); // for when player is augging a piece of equipment while they're wearing it
if (item_slot == INVALID_INDEX || solvent_slot == INVALID_INDEX)
{
Message(Chat::Red, "Error: Invalid Aug Index.");
return;
}
tobe_auged = user_inv.GetItem(item_slot);
solvent = user_inv.GetItem(solvent_slot);
if (!tobe_auged)
{
Message(Chat::Red, "Error: Invalid item passed for augmenting.");
return;
}
if ((in_augment->augment_action == 1) || (in_augment->augment_action == 2))
{
// Check for valid distiller if safely removing / swapping an augmentation
if (!solvent)
{
old_aug = tobe_auged->GetAugment(in_augment->augment_index);
if (!old_aug || old_aug->GetItem()->AugDistiller != 0) {
LogError("Player tried to safely remove an augment without a distiller");
Message(Chat::Red, "Error: Missing an augmentation distiller for safely removing this augment.");
return;
}
}
else if (solvent->GetItem()->ItemType == EQEmu::item::ItemTypeAugmentationDistiller)
{
old_aug = tobe_auged->GetAugment(in_augment->augment_index);
if (!old_aug)
{
LogError("Player tried to safely remove a nonexistent augment");
Message(Chat::Red, "Error: No augment found in slot %i for safely removing.", in_augment->augment_index);
return;
}
else if (solvent->GetItem()->ID != old_aug->GetItem()->AugDistiller)
{
LogError("Player tried to safely remove an augment with the wrong distiller (item [{}] vs expected [{}])", solvent->GetItem()->ID, old_aug->GetItem()->AugDistiller);
Message(Chat::Red, "Error: Wrong augmentation distiller for safely removing this augment.");
return;
}
}
else if (solvent->GetItem()->ItemType != EQEmu::item::ItemTypePerfectedAugmentationDistiller)
{
LogError("Player tried to safely remove an augment with a non-distiller item");
Message(Chat::Red, "Error: Invalid augmentation distiller for safely removing this augment.");
return;
}
}
switch (in_augment->augment_action)
{
case 0: // Adding an augment
case 2: // Swapping augment
new_aug = user_inv.GetItem(EQEmu::invslot::slotCursor);
if (!new_aug) // Shouldn't get the OP code without the augment on the user's cursor, but maybe it's h4x.
{
LogError("AugmentItem OpCode with 'Insert' or 'Swap' action received, but no augment on client's cursor");
Message(Chat::Red, "Error: No augment found on cursor for inserting.");
return;
}
else
{
if (((tobe_auged->IsAugmentSlotAvailable(new_aug->GetAugmentType(), in_augment->augment_index)) != -1) &&
(tobe_auged->AvailableWearSlot(new_aug->GetItem()->Slots)))
{
old_aug = tobe_auged->RemoveAugment(in_augment->augment_index);
if (old_aug)
{
// An old augment was removed in order to be replaced with the new one (augment_action 2)
CalcBonuses();
std::vector<EQEmu::Any> args;
args.push_back(old_aug);
parse->EventItem(EVENT_UNAUGMENT_ITEM, this, tobe_auged, nullptr, "", in_augment->augment_index, &args);
args.assign(1, tobe_auged);
args.push_back(false);
parse->EventItem(EVENT_AUGMENT_REMOVE, this, old_aug, nullptr, "", in_augment->augment_index, &args);
}
tobe_auged->PutAugment(in_augment->augment_index, *new_aug);
tobe_auged->UpdateOrnamentationInfo();
aug = tobe_auged->GetAugment(in_augment->augment_index);
if (aug)
{
std::vector<EQEmu::Any> args;
args.push_back(aug);
parse->EventItem(EVENT_AUGMENT_ITEM, this, tobe_auged, nullptr, "", in_augment->augment_index, &args);
args.assign(1, tobe_auged);
parse->EventItem(EVENT_AUGMENT_INSERT, this, aug, nullptr, "", in_augment->augment_index, &args);
}
else
{
Message(Chat::Red, "Error: Could not properly insert augmentation into augment slot %i. Aborting.", in_augment->augment_index);
return;
}
itemOneToPush = tobe_auged->Clone();
if (old_aug)
{
itemTwoToPush = old_aug->Clone();
}
// Must push items after the items in inventory are deleted - necessary due to lore items...
if (itemOneToPush)
{
DeleteItemInInventory(item_slot, 0, true);
DeleteItemInInventory(EQEmu::invslot::slotCursor, new_aug->IsStackable() ? 1 : 0, true);
if (solvent)
{
// Consume the augment distiller
DeleteItemInInventory(solvent_slot, solvent->IsStackable() ? 1 : 0, true);
}
if (itemTwoToPush)
{
// This is a swap. Return the old aug to the player's cursor.
if (!PutItemInInventory(EQEmu::invslot::slotCursor, *itemTwoToPush, true))
{
LogError("Problem returning old augment to player's cursor after augmentation swap");
Message(Chat::Yellow, "Error: Failed to retrieve old augment after augmentation swap!");
}
}
if (PutItemInInventory(item_slot, *itemOneToPush, true))
{
// Successfully added an augment to the item
CalcBonuses();
if (mat != EQEmu::textures::materialInvalid)
{
SendWearChange(mat); // Visible item augged while equipped. Send WC in case ornamentation changed.
}
}
else
{
Message(Chat::Red, "Error: No available slot for end result. Please free up the augment slot.");
}
}
else
{
Message(Chat::Red, "Error in cloning item for augment. Aborted.");
}
}
else
{
Message(Chat::Red, "Error: No available slot for augment in that item.");
}
}
break;
case 1: // Removing augment safely (distiller)
aug = tobe_auged->GetAugment(in_augment->augment_index);
if (aug)
{
std::vector<EQEmu::Any> args;
args.push_back(aug);
parse->EventItem(EVENT_UNAUGMENT_ITEM, this, tobe_auged, nullptr, "", in_augment->augment_index, &args);
args.assign(1, tobe_auged);
args.push_back(false);
parse->EventItem(EVENT_AUGMENT_REMOVE, this, aug, nullptr, "", in_augment->augment_index, &args);
}
else
{
Message(Chat::Red, "Error: Could not find augmentation to remove at index %i. Aborting.", in_augment->augment_index);
return;
}
old_aug = tobe_auged->RemoveAugment(in_augment->augment_index);
tobe_auged->UpdateOrnamentationInfo();
itemOneToPush = tobe_auged->Clone();
if (old_aug)
itemTwoToPush = old_aug->Clone();
if (itemOneToPush && itemTwoToPush)
{
// Consume the augment distiller
if (solvent)
DeleteItemInInventory(solvent_slot, solvent->IsStackable() ? 1 : 0, true);
// Remove the augmented item
DeleteItemInInventory(item_slot, 0, true);
// Replace it with the unaugmented item
if (!PutItemInInventory(item_slot, *itemOneToPush, true))
{
LogError("Problem returning equipment item to player's inventory after safe augment removal");
Message(Chat::Yellow, "Error: Failed to return item after de-augmentation!");
}
CalcBonuses();
if (mat != EQEmu::textures::materialInvalid)
{
SendWearChange(mat); // Visible item augged while equipped. Send WC in case ornamentation changed.
}
// Drop the removed augment on the player's cursor
if (!PutItemInInventory(EQEmu::invslot::slotCursor, *itemTwoToPush, true))
{
LogError("Problem returning augment to player's cursor after safe removal");
Message(Chat::Yellow, "Error: Failed to return augment after removal from item!");
return;
}
}
break;
case 3: // Destroying augment (formerly done in birdbath/sealer with a solvent)
// RoF client does not require an augmentation solvent for destroying an augmentation in an item.
// Augments can be destroyed with a right click -> Destroy at any time.
aug = tobe_auged->GetAugment(in_augment->augment_index);
if (aug)
{
std::vector<EQEmu::Any> args;
args.push_back(aug);
parse->EventItem(EVENT_UNAUGMENT_ITEM, this, tobe_auged, nullptr, "", in_augment->augment_index, &args);
args.assign(1, tobe_auged);
args.push_back(true);
parse->EventItem(EVENT_AUGMENT_REMOVE, this, aug, nullptr, "", in_augment->augment_index, &args);
}
else
{
Message(Chat::Red, "Error: Could not find augmentation to remove at index %i. Aborting.");
return;
}
tobe_auged->DeleteAugment(in_augment->augment_index);
tobe_auged->UpdateOrnamentationInfo();
itemOneToPush = tobe_auged->Clone();
if (itemOneToPush)
{
DeleteItemInInventory(item_slot, 0, true);
if (!PutItemInInventory(item_slot, *itemOneToPush, true))
{
LogError("Problem returning equipment item to player's inventory after augment deletion");
Message(Chat::Yellow, "Error: Failed to return item after destroying augment!");
}
}
CalcBonuses();
if (mat != EQEmu::textures::materialInvalid)
{
SendWearChange(mat);
}
break;
default: // Unknown
LogInventory("Unrecognized augmentation action - cslot: [{}] aslot: [{}] cidx: [{}] aidx: [{}] act: [{}] dest: [{}]",
in_augment->container_slot, in_augment->augment_slot, in_augment->container_index, in_augment->augment_index, in_augment->augment_action, in_augment->dest_inst_id);
break;
}
}
else
{
// Delegate to tradeskill object to perform combine
Object::HandleAugmentation(this, in_augment, m_tradeskill_object);
}
return;
}
void Client::Handle_OP_AutoAttack(const EQApplicationPacket *app)
{
if (app->size != 4) {
LogError("OP size error: OP_AutoAttack expected:4 got:[{}]", app->size);
return;
}
if (app->pBuffer[0] == 0) {
auto_attack = false;
if (IsAIControlled()) {
return;
}
attack_timer.Disable();
ranged_timer.Disable();
attack_dw_timer.Disable();
m_AutoAttackPosition = glm::vec4();
m_AutoAttackTargetLocation = glm::vec3();
aa_los_them_mob = nullptr;
}
else if (app->pBuffer[0] == 1) {
auto_attack = true;
auto_fire = false;
if (IsAIControlled()) {
return;
}
SetAttackTimer();
if (GetTarget()) {
aa_los_them_mob = GetTarget();
m_AutoAttackPosition = GetPosition();
m_AutoAttackTargetLocation = glm::vec3(aa_los_them_mob->GetPosition());
los_status = CheckLosFN(aa_los_them_mob);
los_status_facing = IsFacingMob(aa_los_them_mob);
}
else {
m_AutoAttackPosition = GetPosition();
m_AutoAttackTargetLocation = glm::vec3();
aa_los_them_mob = nullptr;
los_status = false;
los_status_facing = false;
}
}
}
void Client::Handle_OP_AutoAttack2(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_AutoFire(const EQApplicationPacket *app)
{
if (app->size != sizeof(bool)) {
LogDebug("Size mismatch in OP_AutoFire expected [{}] got [{}]", sizeof(bool), app->size);
DumpPacket(app);
return;
}
bool *af = (bool*)app->pBuffer;
auto_fire = *af;
auto_attack = false;
SetAttackTimer();
}
void Client::Handle_OP_Bandolier(const EQApplicationPacket *app)
{
// Although there are three different structs for OP_Bandolier, they are all the same size.
//
if (app->size != sizeof(BandolierCreate_Struct)) {
LogDebug("Size mismatch in OP_Bandolier expected [{}] got [{}]", sizeof(BandolierCreate_Struct), app->size);
DumpPacket(app);
return;
}
BandolierCreate_Struct *bs = (BandolierCreate_Struct*)app->pBuffer;
switch (bs->Action)
{
case bandolierCreate:
CreateBandolier(app);
break;
case bandolierRemove:
RemoveBandolier(app);
break;
case bandolierSet:
SetBandolier(app);
break;
default:
LogDebug("Unknown Bandolier action [{}]", bs->Action);
break;
}
}
void Client::Handle_OP_BankerChange(const EQApplicationPacket *app)
{
if (app->size != sizeof(BankerChange_Struct) && app->size != 4) //Titanium only sends 4 Bytes for this
{
LogDebug("Size mismatch in OP_BankerChange expected [{}] got [{}]", sizeof(BankerChange_Struct), app->size);
DumpPacket(app);
return;
}
uint32 distance = 0;
NPC *banker = entity_list.GetClosestBanker(this, distance);
if (!banker || distance > USE_NPC_RANGE2)
{
char *hacked_string = nullptr;
MakeAnyLenString(&hacked_string, "Player tried to make use of a banker(money) but %s is non-existant or too far away (%u units).",
banker ? banker->GetName() : "UNKNOWN NPC", distance);
database.SetMQDetectionFlag(AccountName(), GetName(), hacked_string, zone->GetShortName());
safe_delete_array(hacked_string);
return;
}
auto outapp = new EQApplicationPacket(OP_BankerChange, nullptr, sizeof(BankerChange_Struct));
BankerChange_Struct *bc = (BankerChange_Struct *)outapp->pBuffer;
if (m_pp.platinum < 0)
m_pp.platinum = 0;
if (m_pp.gold < 0)
m_pp.gold = 0;
if (m_pp.silver < 0)
m_pp.silver = 0;
if (m_pp.copper < 0)
m_pp.copper = 0;
if (m_pp.platinum_bank < 0)
m_pp.platinum_bank = 0;
if (m_pp.gold_bank < 0)
m_pp.gold_bank = 0;
if (m_pp.silver_bank < 0)
m_pp.silver_bank = 0;
if (m_pp.copper_bank < 0)
m_pp.copper_bank = 0;
uint64 cp = static_cast<uint64>(m_pp.copper) +
(static_cast<uint64>(m_pp.silver) * 10) +
(static_cast<uint64>(m_pp.gold) * 100) +
(static_cast<uint64>(m_pp.platinum) * 1000);
m_pp.copper = cp % 10;
cp /= 10;
m_pp.silver = cp % 10;
cp /= 10;
m_pp.gold = cp % 10;
cp /= 10;
m_pp.platinum = cp;
cp = static_cast<uint64>(m_pp.copper_bank) +
(static_cast<uint64>(m_pp.silver_bank) * 10) +
(static_cast<uint64>(m_pp.gold_bank) * 100) +
(static_cast<uint64>(m_pp.platinum_bank) * 1000);
m_pp.copper_bank = cp % 10;
cp /= 10;
m_pp.silver_bank = cp % 10;
cp /= 10;
m_pp.gold_bank = cp % 10;
cp /= 10;
m_pp.platinum_bank = cp;
bc->copper = m_pp.copper;
bc->silver = m_pp.silver;
bc->gold = m_pp.gold;
bc->platinum = m_pp.platinum;
bc->copper_bank = m_pp.copper_bank;
bc->silver_bank = m_pp.silver_bank;
bc->gold_bank = m_pp.gold_bank;
bc->platinum_bank = m_pp.platinum_bank;
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_Barter(const EQApplicationPacket *app)
{
if (app->size < 4)
{
LogDebug("OP_Barter packet below minimum expected size. The packet was [{}] bytes", app->size);
DumpPacket(app);
return;
}
char* Buf = (char *)app->pBuffer;
// The first 4 bytes of the packet determine the action. A lot of Barter packets require the
// packet the client sent, sent back to it as an acknowledgement.
//
uint32 Action = VARSTRUCT_DECODE_TYPE(uint32, Buf);
switch (Action)
{
case Barter_BuyerSearch:
{
BuyerItemSearch(app);
break;
}
case Barter_SellerSearch:
{
BarterSearchRequest_Struct *bsr = (BarterSearchRequest_Struct*)app->pBuffer;
SendBuyerResults(bsr->SearchString, bsr->SearchID);
break;
}
case Barter_BuyerModeOn:
{
if (!Trader) {
ToggleBuyerMode(true);
}
else {
Buf = (char *)app->pBuffer;
VARSTRUCT_ENCODE_TYPE(uint32, Buf, Barter_BuyerModeOff);
Message(Chat::Red, "You cannot be a Trader and Buyer at the same time.");
}
QueuePacket(app);
break;
}
case Barter_BuyerModeOff:
{
QueuePacket(app);
ToggleBuyerMode(false);
break;
}
case Barter_BuyerItemUpdate:
{
UpdateBuyLine(app);
break;
}
case Barter_BuyerItemRemove:
{
BuyerRemoveItem_Struct* bris = (BuyerRemoveItem_Struct*)app->pBuffer;
database.RemoveBuyLine(CharacterID(), bris->BuySlot);
QueuePacket(app);
break;
}
case Barter_SellItem:
{
SellToBuyer(app);
break;
}
case Barter_BuyerInspectBegin:
{
ShowBuyLines(app);
break;
}
case Barter_BuyerInspectEnd:
{
BuyerInspectRequest_Struct* bir = (BuyerInspectRequest_Struct*)app->pBuffer;
Client *Buyer = entity_list.GetClientByID(bir->BuyerID);
if (Buyer)
Buyer->WithCustomer(0);
break;
}
case Barter_BarterItemInspect:
{
BarterItemSearchLinkRequest_Struct* bislr = (BarterItemSearchLinkRequest_Struct*)app->pBuffer;
const EQEmu::ItemData* item = database.GetItem(bislr->ItemID);
if (!item)
Message(Chat::Red, "Error: This item does not exist!");
else
{
EQEmu::ItemInstance* inst = database.CreateItem(item);
if (inst)
{
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
}
break;
}
case Barter_Welcome:
{
SendBazaarWelcome();
break;
}
case Barter_WelcomeMessageUpdate:
{
BuyerWelcomeMessageUpdate_Struct* bwmu = (BuyerWelcomeMessageUpdate_Struct*)app->pBuffer;
SetBuyerWelcomeMessage(bwmu->WelcomeMessage);
break;
}
case Barter_BuyerItemInspect:
{
BuyerItemSearchLinkRequest_Struct* bislr = (BuyerItemSearchLinkRequest_Struct*)app->pBuffer;
const EQEmu::ItemData* item = database.GetItem(bislr->ItemID);
if (!item)
Message(Chat::Red, "Error: This item does not exist!");
else
{
EQEmu::ItemInstance* inst = database.CreateItem(item);
if (inst)
{
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
}
break;
}
case Barter_Unknown23:
{
// Sent by SoD client for no discernible reason.
break;
}
default:
Message(Chat::Red, "Unrecognised Barter action.");
LogTrading("Unrecognised Barter Action [{}]", Action);
}
}
void Client::Handle_OP_BazaarInspect(const EQApplicationPacket *app)
{
if (app->size != sizeof(BazaarInspect_Struct)) {
LogError("Invalid size for BazaarInspect_Struct: Expected [{}], Got [{}]",
sizeof(BazaarInspect_Struct), app->size);
return;
}
BazaarInspect_Struct* bis = (BazaarInspect_Struct*)app->pBuffer;
const EQEmu::ItemData* item = database.GetItem(bis->ItemID);
if (!item) {
Message(Chat::Red, "Error: This item does not exist!");
return;
}
EQEmu::ItemInstance* inst = database.CreateItem(item);
if (inst) {
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
return;
}
void Client::Handle_OP_BazaarSearch(const EQApplicationPacket *app)
{
if (app->size == sizeof(BazaarSearch_Struct)) {
BazaarSearch_Struct* bss = (BazaarSearch_Struct*)app->pBuffer;
this->SendBazaarResults(bss->TraderID, bss->Class_, bss->Race, bss->ItemStat, bss->Slot, bss->Type,
bss->Name, bss->MinPrice * 1000, bss->MaxPrice * 1000);
}
else if (app->size == sizeof(BazaarWelcome_Struct)) {
BazaarWelcome_Struct* bws = (BazaarWelcome_Struct*)app->pBuffer;
if (bws->Beginning.Action == BazaarWelcome)
SendBazaarWelcome();
}
else if (app->size == sizeof(NewBazaarInspect_Struct)) {
NewBazaarInspect_Struct *nbis = (NewBazaarInspect_Struct*)app->pBuffer;
Client *c = entity_list.GetClientByName(nbis->Name);
if (c) {
EQEmu::ItemInstance* inst = c->FindTraderItemBySerialNumber(nbis->SerialNumber);
if (inst)
SendItemPacket(0, inst, ItemPacketViewLink);
}
return;
}
else {
LogTrading("Malformed BazaarSearch_Struct packe, Action [{}]t received, ignoring");
LogError("Malformed BazaarSearch_Struct packet received, ignoring\n");
}
return;
}
void Client::Handle_OP_Begging(const EQApplicationPacket *app)
{
if (!p_timers.Expired(&database, pTimerBeggingPickPocket, false))
{
Message(Chat::Red, "Ability recovery time not yet met.");
auto outapp = new EQApplicationPacket(OP_Begging, sizeof(BeggingResponse_Struct));
BeggingResponse_Struct *brs = (BeggingResponse_Struct*)outapp->pBuffer;
brs->Result = 0;
FastQueuePacket(&outapp);
return;
}
if (!HasSkill(EQEmu::skills::SkillBegging) || !GetTarget())
return;
if (GetTarget()->GetClass() == LDON_TREASURE)
return;
p_timers.Start(pTimerBeggingPickPocket, 8);
auto outapp = new EQApplicationPacket(OP_Begging, sizeof(BeggingResponse_Struct));
BeggingResponse_Struct *brs = (BeggingResponse_Struct*)outapp->pBuffer;
brs->Result = 0; // Default, Fail.
if (GetTarget() == this)
{
FastQueuePacket(&outapp);
return;
}
int RandomChance = zone->random.Int(0, 100);
int ChanceToAttack = 0;
if (GetLevel() > GetTarget()->GetLevel())
ChanceToAttack = zone->random.Int(0, 15);
else
ChanceToAttack = zone->random.Int(((this->GetTarget()->GetLevel() - this->GetLevel()) * 10) - 5, ((this->GetTarget()->GetLevel() - this->GetLevel()) * 10));
if (ChanceToAttack < 0)
ChanceToAttack = -ChanceToAttack;
if (RandomChance < ChanceToAttack)
{
GetTarget()->Attack(this);
QueuePacket(outapp);
safe_delete(outapp);
return;
}
uint16 CurrentSkill = GetSkill(EQEmu::skills::SkillBegging);
float ChanceToBeg = ((float)(CurrentSkill / 700.0f) + 0.15f) * 100;
if (RandomChance < ChanceToBeg)
{
brs->Amount = zone->random.Int(1, 10);
// This needs some work to determine how much money they can beg, based on skill level etc.
if (CurrentSkill < 50)
{
brs->Result = 4; // Copper
AddMoneyToPP(brs->Amount, false);
}
else
{
brs->Result = 3; // Silver
AddMoneyToPP(brs->Amount * 10, false);
}
}
QueuePacket(outapp);
safe_delete(outapp);
CheckIncreaseSkill(EQEmu::skills::SkillBegging, nullptr, -10);
}
void Client::Handle_OP_Bind_Wound(const EQApplicationPacket *app)
{
if (app->size != sizeof(BindWound_Struct)) {
LogError("Size mismatch for Bind wound packet");
DumpPacket(app);
}
BindWound_Struct* bind_in = (BindWound_Struct*)app->pBuffer;
Mob* bindmob = entity_list.GetMob(bind_in->to);
if (!bindmob) {
LogError("Bindwound on non-exsistant mob from [{}]", this->GetName());
}
else {
LogDebug("BindWound in: to:\'[{}]\' from=\'[{}]\'", bindmob->GetName(), GetName());
BindWound(bindmob, true);
}
return;
}
void Client::Handle_OP_BlockedBuffs(const EQApplicationPacket *app)
{
if (!RuleB(Spells, EnableBlockedBuffs))
return;
if (app->size != sizeof(BlockedBuffs_Struct))
{
LogDebug("Size mismatch in OP_BlockedBuffs expected [{}] got [{}]", sizeof(BlockedBuffs_Struct), app->size);
DumpPacket(app);
return;
}
std::set<uint32>::iterator Iterator;
BlockedBuffs_Struct *bbs = (BlockedBuffs_Struct*)app->pBuffer;
std::set<uint32> *BlockedBuffs = bbs->Pet ? &PetBlockedBuffs : &PlayerBlockedBuffs;
if (bbs->Initialise == 1)
{
BlockedBuffs->clear();
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
{
if ((IsValidSpell(bbs->SpellID[i])) && IsBeneficialSpell(bbs->SpellID[i]) && !spells[bbs->SpellID[i]].no_block)
{
if (BlockedBuffs->find(bbs->SpellID[i]) == BlockedBuffs->end())
BlockedBuffs->insert(bbs->SpellID[i]);
}
}
auto outapp = new EQApplicationPacket(OP_BlockedBuffs, sizeof(BlockedBuffs_Struct));
BlockedBuffs_Struct *obbs = (BlockedBuffs_Struct*)outapp->pBuffer;
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
obbs->SpellID[i] = -1;
obbs->Pet = bbs->Pet;
obbs->Initialise = 1;
obbs->Flags = 0x54;
obbs->Count = BlockedBuffs->size();
unsigned int Element = 0;
Iterator = BlockedBuffs->begin();
while (Iterator != BlockedBuffs->end())
{
obbs->SpellID[Element++] = (*Iterator);
++Iterator;
}
FastQueuePacket(&outapp);
return;
}
if ((bbs->Initialise == 0) && (bbs->Count > 0))
{
auto outapp = new EQApplicationPacket(OP_BlockedBuffs, sizeof(BlockedBuffs_Struct));
BlockedBuffs_Struct *obbs = (BlockedBuffs_Struct*)outapp->pBuffer;
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
obbs->SpellID[i] = -1;
obbs->Pet = bbs->Pet;
obbs->Initialise = 0;
obbs->Flags = 0x54;
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
{
if (!IsValidSpell(bbs->SpellID[i]) || !IsBeneficialSpell(bbs->SpellID[i]) || spells[bbs->SpellID[i]].no_block)
continue;
if ((BlockedBuffs->size() < BLOCKED_BUFF_COUNT) && (BlockedBuffs->find(bbs->SpellID[i]) == BlockedBuffs->end()))
BlockedBuffs->insert(bbs->SpellID[i]);
}
obbs->Count = BlockedBuffs->size();
Iterator = BlockedBuffs->begin();
unsigned int Element = 0;
while (Iterator != BlockedBuffs->end())
{
obbs->SpellID[Element++] = (*Iterator);
++Iterator;
}
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_BoardBoat(const EQApplicationPacket *app)
{
// this sends unclean mob name, so capped at 64
// a_boat006
if (app->size <= 5 || app->size > 64) {
LogError("Size mismatch in OP_BoardBoad. Expected greater than 5 less than 64, got [{}]", app->size);
DumpPacket(app);
return;
}
char boatname[64];
memcpy(boatname, app->pBuffer, app->size);
boatname[63] = '\0';
Mob* boat = entity_list.GetMob(boatname);
if (!boat || !boat->IsControllableBoat()) {
return;
}
controlling_boat_id = boat->GetID(); // set the client's BoatID to show that it's on this boat
Message(0, "Board boat: %s", boatname);
return;
}
void Client::Handle_OP_Buff(const EQApplicationPacket *app)
{
if (app->size != sizeof(SpellBuffPacket_Struct))
{
LogError("Size mismatch in OP_Buff. expected [{}] got [{}]", sizeof(SpellBuffPacket_Struct), app->size);
DumpPacket(app);
return;
}
SpellBuffPacket_Struct* sbf = (SpellBuffPacket_Struct*)app->pBuffer;
uint32 spid = sbf->buff.spellid;
LogSpells("Client requested that buff with spell id [{}] be canceled", spid);
//something about IsDetrimentalSpell() crashes this portion of code..
//tbh we shouldn't use it anyway since this is a simple red vs blue buff check and
//isdetrimentalspell() is much more complex
if (spid == 0xFFFF || (IsValidSpell(spid) && (spells[spid].goodEffect == 0)))
QueuePacket(app);
else
BuffFadeBySpellID(spid);
return;
}
void Client::Handle_OP_BuffRemoveRequest(const EQApplicationPacket *app)
{
// In SoD, this is used for clicking off Pet Buffs only. In Underfoot, it is used both for Client and Pets
// The payload contains buffslot and EntityID only, so we must check if the EntityID is ours or our pets.
//
VERIFY_PACKET_LENGTH(OP_BuffRemoveRequest, app, BuffRemoveRequest_Struct);
BuffRemoveRequest_Struct *brrs = (BuffRemoveRequest_Struct*)app->pBuffer;
Mob *m = nullptr;
if (brrs->EntityID == GetID()) {
m = this;
}
else if (brrs->EntityID == GetPetID()) {
m = GetPet();
}
#ifdef BOTS
else {
Mob* bot_test = entity_list.GetMob(brrs->EntityID);
if (bot_test && bot_test->IsBot() && bot_test->GetOwner() == this)
m = bot_test;
}
#endif
if (!m)
return;
if (brrs->SlotID > (uint32)m->GetMaxTotalSlots())
return;
uint16 SpellID = m->GetSpellIDFromSlot(brrs->SlotID);
if (SpellID && IsBeneficialSpell(SpellID) && !spells[SpellID].no_remove)
m->BuffFadeBySlot(brrs->SlotID, true);
}
void Client::Handle_OP_Bug(const EQApplicationPacket *app)
{
if (!RuleB(Bugs, ReportingSystemActive)) {
Message(0, "Bug reporting is disabled on this server.");
return;
}
if (app->size != sizeof(BugReport_Struct)) {
printf("Wrong size of BugReport_Struct got %d expected %zu!\n", app->size, sizeof(BugReport_Struct));
}
else {
BugReport_Struct* bug_report = (BugReport_Struct*)app->pBuffer;
if (RuleB(Bugs, UseOldReportingMethod))
database.RegisterBug(bug_report);
else
database.RegisterBug(this, bug_report);
}
return;
}
void Client::Handle_OP_Camp(const EQApplicationPacket *app)
{
#ifdef BOTS
// This block is necessary to clean up any bot objects owned by a Client
Bot::BotOrderCampAll(this);
// Evidently, this is bad under certain conditions and causes crashes...
// Group and Raid code really needs to be overhauled to account for non-client types (mercs and bots)
//auto group = GetGroup();
//if (group && group->GroupCount() < 2)
// group->DisbandGroup();
#endif
if (IsLFP())
worldserver.StopLFP(CharacterID());
if (GetGM())
{
OnDisconnect(true);
return;
}
camp_timer.Start(29000, true);
return;
}
void Client::Handle_OP_CancelTask(const EQApplicationPacket *app)
{
if (app->size != sizeof(CancelTask_Struct)) {
LogDebug("Size mismatch in OP_CancelTask expected [{}] got [{}]", sizeof(CancelTask_Struct), app->size);
DumpPacket(app);
return;
}
CancelTask_Struct *cts = (CancelTask_Struct*)app->pBuffer;
if (RuleB(TaskSystem, EnableTaskSystem) && taskstate)
taskstate->CancelTask(this, cts->SequenceNumber, static_cast<TaskType>(cts->type));
}
void Client::Handle_OP_CancelTrade(const EQApplicationPacket *app)
{
if (app->size != sizeof(CancelTrade_Struct)) {
LogError("Wrong size: OP_CancelTrade, size=[{}], expected [{}]", app->size, sizeof(CancelTrade_Struct));
return;
}
Mob* with = trade->With();
if (with && with->IsClient()) {
CancelTrade_Struct* msg = (CancelTrade_Struct*)app->pBuffer;
// Forward cancel packet to other client
msg->fromid = with->GetID();
//msg->action = 1;
with->CastToClient()->QueuePacket(app);
// Put trade items/cash back into inventory
FinishTrade(this);
trade->Reset();
}
else if (with) {
CancelTrade_Struct* msg = (CancelTrade_Struct*)app->pBuffer;
msg->fromid = with->GetID();
QueuePacket(app);
FinishTrade(this);
trade->Reset();
}
EQApplicationPacket end_trade1(OP_FinishWindow, 0);
QueuePacket(&end_trade1);
EQApplicationPacket end_trade2(OP_FinishWindow2, 0);
QueuePacket(&end_trade2);
return;
}
void Client::Handle_OP_CastSpell(const EQApplicationPacket *app)
{
using EQEmu::spells::CastingSlot;
if (app->size != sizeof(CastSpell_Struct)) {
std::cout << "Wrong size: OP_CastSpell, size=" << app->size << ", expected " << sizeof(CastSpell_Struct) << std::endl;
return;
}
if (IsAIControlled()) {
this->MessageString(Chat::Red, NOT_IN_CONTROL);
//Message(Chat::Red, "You cant cast right now, you arent in control of yourself!");
return;
}
CastSpell_Struct* castspell = (CastSpell_Struct*)app->pBuffer;
m_TargetRing = glm::vec3(castspell->x_pos, castspell->y_pos, castspell->z_pos);
LogSpells("OP CastSpell: slot [{}] spell [{}] target [{}] inv [{}]", castspell->slot, castspell->spell_id, castspell->target_id, (unsigned long)castspell->inventoryslot);
CastingSlot slot = static_cast<CastingSlot>(castspell->slot);
/* Memorized Spell */
if (m_pp.mem_spells[castspell->slot] && m_pp.mem_spells[castspell->slot] == castspell->spell_id) {
uint16 spell_to_cast = 0;
if (castspell->slot < EQEmu::spells::SPELL_GEM_COUNT) {
spell_to_cast = m_pp.mem_spells[castspell->slot];
if (spell_to_cast != castspell->spell_id) {
InterruptSpell(castspell->spell_id); //CHEATER!!!
return;
}
}
else if (castspell->slot >= EQEmu::spells::SPELL_GEM_COUNT) {
InterruptSpell();
return;
}
CastSpell(spell_to_cast, castspell->target_id, slot);
}
/* Spell Slot or Potion Belt Slot */
else if (slot == CastingSlot::Item || slot == CastingSlot::PotionBelt) // ITEM or POTION cast
{
if (m_inv.SupportsClickCasting(castspell->inventoryslot) || slot == CastingSlot::PotionBelt) // sanity check
{
// packet field types will be reviewed as packet transistions occur
const EQEmu::ItemInstance* inst = m_inv[castspell->inventoryslot]; //slot values are int16, need to check packet on this field
//bool cancast = true;
if (inst && inst->IsClassCommon())
{
const EQEmu::ItemData* item = inst->GetItem();
if (item->Click.Effect != (uint32)castspell->spell_id)
{
database.SetMQDetectionFlag(account_name, name, "OP_CastSpell with item, tried to cast a different spell.", zone->GetShortName());
InterruptSpell(castspell->spell_id); //CHEATER!!
return;
}
if ((item->Click.Type == EQEmu::item::ItemEffectClick) || (item->Click.Type == EQEmu::item::ItemEffectExpendable) || (item->Click.Type == EQEmu::item::ItemEffectEquipClick) || (item->Click.Type == EQEmu::item::ItemEffectClick2))
{
if (item->Click.Level2 > 0)
{
if (GetLevel() >= item->Click.Level2)
{
EQEmu::ItemInstance* p_inst = (EQEmu::ItemInstance*)inst;
int i = parse->EventItem(EVENT_ITEM_CLICK_CAST, this, p_inst, nullptr, "", castspell->inventoryslot);
if (i == 0) {
CastSpell(item->Click.Effect, castspell->target_id, slot, item->CastTime, 0, 0, castspell->inventoryslot);
}
else {
InterruptSpell(castspell->spell_id);
return;
}
}
else
{
database.SetMQDetectionFlag(account_name, name, "OP_CastSpell with item, did not meet req level.", zone->GetShortName());
Message(0, "Error: level not high enough.", castspell->inventoryslot);
InterruptSpell(castspell->spell_id);
}
}
else
{
EQEmu::ItemInstance* p_inst = (EQEmu::ItemInstance*)inst;
int i = parse->EventItem(EVENT_ITEM_CLICK_CAST, this, p_inst, nullptr, "", castspell->inventoryslot);
if (i == 0) {
CastSpell(item->Click.Effect, castspell->target_id, slot, item->CastTime, 0, 0, castspell->inventoryslot);
}
else {
InterruptSpell(castspell->spell_id);
return;
}
}
}
else
{
Message(0, "Error: unknown item->Click.Type (0x%02x)", item->Click.Type);
}
}
else
{
Message(0, "Error: item not found in inventory slot #%i", castspell->inventoryslot);
InterruptSpell(castspell->spell_id);
}
}
else
{
Message(0, "Error: castspell->inventoryslot >= %i (0x%04x)", EQEmu::invslot::slotCursor, castspell->inventoryslot);
InterruptSpell(castspell->spell_id);
}
}
/* Discipline -- older clients use the same slot as items, but we translate to it's own */
else if (slot == CastingSlot::Discipline) {
if (!UseDiscipline(castspell->spell_id, castspell->target_id)) {
LogSpells("Unknown ability being used by [{}], spell being cast is: [{}]\n", GetName(), castspell->spell_id);
InterruptSpell(castspell->spell_id);
return;
}
}
/* ABILITY cast (LoH and Harm Touch) */
else if (slot == CastingSlot::Ability) {
uint16 spell_to_cast = 0;
if (castspell->spell_id == SPELL_LAY_ON_HANDS && GetClass() == PALADIN) {
if (!p_timers.Expired(&database, pTimerLayHands)) {
Message(Chat::Red, "Ability recovery time not yet met.");
InterruptSpell(castspell->spell_id);
return;
}
spell_to_cast = SPELL_LAY_ON_HANDS;
p_timers.Start(pTimerLayHands, LayOnHandsReuseTime);
}
else if ((castspell->spell_id == SPELL_HARM_TOUCH
|| castspell->spell_id == SPELL_HARM_TOUCH2) && GetClass() == SHADOWKNIGHT) {
if (!p_timers.Expired(&database, pTimerHarmTouch)) {
Message(Chat::Red, "Ability recovery time not yet met.");
InterruptSpell(castspell->spell_id);
return;
}
// determine which version of HT we are casting based on level
if (GetLevel() < 40)
spell_to_cast = SPELL_HARM_TOUCH;
else
spell_to_cast = SPELL_HARM_TOUCH2;
p_timers.Start(pTimerHarmTouch, HarmTouchReuseTime);
}
if (spell_to_cast > 0) // if we've matched LoH or HT, cast now
CastSpell(spell_to_cast, castspell->target_id, slot);
}
return;
}
void Client::Handle_OP_ChannelMessage(const EQApplicationPacket *app)
{
ChannelMessage_Struct* cm = (ChannelMessage_Struct*)app->pBuffer;
if (app->size < sizeof(ChannelMessage_Struct)) {
std::cout << "Wrong size " << app->size << ", should be " << sizeof(ChannelMessage_Struct) << "+ on 0x" << std::hex << std::setfill('0') << std::setw(4) << app->GetOpcode() << std::dec << std::endl;
return;
}
if (IsAIControlled()) {
Message(Chat::Red, "You try to speak but cant move your mouth!");
return;
}
uint8 skill_in_language = 100;
if (cm->language < MAX_PP_LANGUAGE)
{
skill_in_language = m_pp.languages[cm->language];
}
ChannelMessageReceived(cm->chan_num, cm->language, skill_in_language, cm->message, cm->targetname);
return;
}
void Client::Handle_OP_ClearBlockedBuffs(const EQApplicationPacket *app)
{
if (!RuleB(Spells, EnableBlockedBuffs))
return;
if (app->size != 1)
{
LogDebug("Size mismatch in OP_ClearBlockedBuffs expected 1 got [{}]", app->size);
DumpPacket(app);
return;
}
bool Pet = app->pBuffer[0];
if (Pet)
PetBlockedBuffs.clear();
else
PlayerBlockedBuffs.clear();
QueuePacket(app);
}
void Client::Handle_OP_ClearNPCMarks(const EQApplicationPacket *app)
{
if (app->size != 0)
{
LogDebug("Size mismatch in OP_ClearNPCMarks expected 0 got [{}]", app->size);
DumpPacket(app);
return;
}
Group *g = GetGroup();
if (g)
g->ClearAllNPCMarks();
}
void Client::Handle_OP_ClearSurname(const EQApplicationPacket *app)
{
ChangeLastName("");
}
void Client::Handle_OP_ClickDoor(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClickDoor_Struct)) {
LogError("Wrong size: OP_ClickDoor, size=[{}], expected [{}]", app->size, sizeof(ClickDoor_Struct));
return;
}
ClickDoor_Struct* cd = (ClickDoor_Struct*)app->pBuffer;
Doors* currentdoor = entity_list.FindDoor(cd->doorid);
if (!currentdoor)
{
Message(0, "Unable to find door, please notify a GM (DoorID: %i).", cd->doorid);
return;
}
char buf[20];
snprintf(buf, 19, "%u", cd->doorid);
buf[19] = '\0';
std::vector<EQEmu::Any> args;
args.push_back(currentdoor);
parse->EventPlayer(EVENT_CLICK_DOOR, this, buf, 0, &args);
currentdoor->HandleClick(this, 0);
return;
}
void Client::Handle_OP_ClickObject(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClickObject_Struct)) {
LogError("Invalid size on ClickObject_Struct: Expected [{}], Got [{}]",
sizeof(ClickObject_Struct), app->size);
return;
}
ClickObject_Struct* click_object = (ClickObject_Struct*)app->pBuffer;
Entity* entity = entity_list.GetID(click_object->drop_id);
if (entity && entity->IsObject()) {
Object* object = entity->CastToObject();
object->HandleClick(this, click_object);
std::vector<EQEmu::Any> args;
args.push_back(object);
char buf[10];
snprintf(buf, 9, "%u", click_object->drop_id);
buf[9] = '\0';
parse->EventPlayer(EVENT_CLICK_OBJECT, this, buf, GetID(), &args);
}
// Observed in RoF after OP_ClickObjectAction:
//EQApplicationPacket end_trade2(OP_FinishWindow2, 0);
//QueuePacket(&end_trade2);
return;
}
void Client::Handle_OP_ClickObjectAction(const EQApplicationPacket *app)
{
if (app->size == 0) {
// RoF sends this packet 0 sized when switching from auto-combine to experiment windows.
// Not completely sure if 0 sized is for this or for closing objects as commented out below
EQApplicationPacket end_trade1(OP_FinishWindow, 0);
QueuePacket(&end_trade1);
EQApplicationPacket end_trade2(OP_FinishWindow2, 0);
QueuePacket(&end_trade2);
// RoF sends a 0 sized packet for closing objects
if (GetTradeskillObject() && ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
GetTradeskillObject()->CastToObject()->Close();
return;
}
else
{
if (app->size != sizeof(ClickObjectAction_Struct)) {
LogError("Invalid size on OP_ClickObjectAction: Expected [{}], Got [{}]",
sizeof(ClickObjectAction_Struct), app->size);
return;
}
ClickObjectAction_Struct* oos = (ClickObjectAction_Struct*)app->pBuffer;
Entity* entity = entity_list.GetEntityObject(oos->drop_id);
if (entity && entity->IsObject()) {
Object* object = entity->CastToObject();
if (oos->open == 0) {
object->Close();
}
else {
LogError("Unsupported action [{}] in OP_ClickObjectAction", oos->open);
}
}
else {
LogError("Invalid object [{}] in OP_ClickObjectAction", oos->drop_id);
}
}
SetTradeskillObject(nullptr);
EQApplicationPacket end_trade1(OP_FinishWindow, 0);
QueuePacket(&end_trade1);
EQApplicationPacket end_trade2(OP_FinishWindow2, 0);
QueuePacket(&end_trade2);
return;
}
void Client::Handle_OP_ClientError(const EQApplicationPacket *app)
{
ClientError_Struct* error = (ClientError_Struct*)app->pBuffer;
LogError("Client error: [{}]", error->character_name);
LogError("Error message:[{}]", error->message);
return;
}
void Client::Handle_OP_ClientTimeStamp(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_ClientUpdate(const EQApplicationPacket *app) {
if (IsAIControlled())
return;
if (dead)
return;
/* Invalid size check */
if (app->size != sizeof(PlayerPositionUpdateClient_Struct)
&& app->size != (sizeof(PlayerPositionUpdateClient_Struct) + 1)
) {
LogError("OP size error: OP_ClientUpdate expected:[{}] got:[{}]",
sizeof(PlayerPositionUpdateClient_Struct), app->size);
return;
}
PlayerPositionUpdateClient_Struct *ppu = (PlayerPositionUpdateClient_Struct *) app->pBuffer;
/* Boat handling */
if (ppu->spawn_id != GetID()) {
/* If player is controlling boat */
if (ppu->spawn_id && ppu->spawn_id == controlling_boat_id) {
Mob *boat = entity_list.GetMob(controlling_boat_id);
if (boat == 0) {
controlling_boat_id = 0;
return;
}
auto boat_delta = glm::vec4(ppu->delta_x, ppu->delta_y, ppu->delta_z, EQ10toFloat(ppu->delta_heading));
boat->SetDelta(boat_delta);
auto outapp = new EQApplicationPacket(OP_ClientUpdate, sizeof(PlayerPositionUpdateServer_Struct));
PlayerPositionUpdateServer_Struct *ppus = (PlayerPositionUpdateServer_Struct *) outapp->pBuffer;
boat->MakeSpawnUpdate(ppus);
entity_list.QueueCloseClients(boat, outapp, true, 300, this, false);
safe_delete(outapp);
/* Update the boat's position on the server, without sending an update */
boat->GMMove(ppu->x_pos, ppu->y_pos, ppu->z_pos, EQ12toFloat(ppu->heading), false);
return;
}
else {
// Eye of Zomm needs code here to track position of the eye on server
// so that other clients see it. I could add a check here for eye of zomm
// race, to limit this code, but this should handle any client controlled
// mob that gets updates from OP_ClientUpdate
if (ppu->spawn_id == controlled_mob_id) {
Mob *cmob = entity_list.GetMob(ppu->spawn_id);
if (cmob != nullptr) {
cmob->SetPosition(ppu->x_pos, ppu->y_pos, ppu->z_pos);
cmob->SetHeading(EQ12toFloat(ppu->heading));
mMovementManager->SendCommandToClients(cmob, 0.0, 0.0, 0.0,
0.0, 0, ClientRangeAny, nullptr, this);
cmob->CastToNPC()->SaveGuardSpot(glm::vec4(ppu->x_pos,
ppu->y_pos, ppu->z_pos, EQ12toFloat(ppu->heading)));
}
}
}
return;
}
// At this point, all that's left is a client update.
// Pure boat updates, and client contolled mob updates are complete.
// This can still be tricky. If ppu->vehicle_id is set, then the client
// position is actually an offset from the boat he is inside.
bool on_boat = (ppu->vehicle_id != 0);
// From this point forward, we need to use a new set of variables for client
// position. If the client is in a boat, we need to add the boat pos and
// the client offset together.
float cx = ppu->x_pos;
float cy = ppu->y_pos;
float cz = ppu->z_pos;
float new_heading = EQ12toFloat(ppu->heading);
if (on_boat) {
Mob *boat = entity_list.GetMob(ppu->vehicle_id);
if (boat == 0) {
LogError("Can't find boat for client position offset.");
}
else {
cx += boat->GetX();
cy += boat->GetY();
cz += boat->GetZ();
new_heading += boat->GetHeading();
}
}
if (IsDraggingCorpse())
DragCorpses();
/* Check to see if PPU should trigger an update to the rewind position. */
float rewind_x_diff = 0;
float rewind_y_diff = 0;
rewind_x_diff = cx - m_RewindLocation.x;
rewind_x_diff *= rewind_x_diff;
rewind_y_diff = cy - m_RewindLocation.y;
rewind_y_diff *= rewind_y_diff;
/*
We only need to store updated values if the player has moved.
If the player has moved more than units for x or y, then we'll store
his pre-PPU x and y for /rewind, in case he gets stuck.
*/
if ((rewind_x_diff > 750) || (rewind_y_diff > 750))
m_RewindLocation = glm::vec3(m_Position);
/*
If the PPU was a large jump, such as a cross zone gate or Call of Hero,
just update rewind coordinates to the new ppu coordinates. This will prevent exploitation.
*/
if ((rewind_x_diff > 5000) || (rewind_y_diff > 5000))
m_RewindLocation = glm::vec3(cx, cy, cz);
if (proximity_timer.Check()) {
entity_list.ProcessMove(this, glm::vec3(cx, cy, cz));
if (RuleB(TaskSystem, EnableTaskSystem) && RuleB(TaskSystem, EnableTaskProximity))
ProcessTaskProximities(cx, cy, cz);
m_Proximity = glm::vec3(cx, cy, cz);
}
/* Update internal state */
m_Delta = glm::vec4(ppu->delta_x, ppu->delta_y, ppu->delta_z, EQ10toFloat(ppu->delta_heading));
if (IsTracking() && ((m_Position.x != cx) || (m_Position.y != cy))) {
if (zone->random.Real(0, 100) < 70)//should be good
CheckIncreaseSkill(EQEmu::skills::SkillTracking, nullptr, -20);
}
/* Break Hide if moving without sneaking and set rewind timer if moved */
if (cy != m_Position.y || cx != m_Position.x) {
if ((hidden || improved_hidden) && !sneaking) {
hidden = false;
improved_hidden = false;
if (!invisible) {
auto outapp =
new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct *sa_out = (SpawnAppearance_Struct *) outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x03;
sa_out->parameter = 0;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
}
}
rewind_timer.Start(30000, true);
}
/* Handle client aggro scanning timers NPCs */
is_client_moving = (cy == m_Position.y && cx == m_Position.x) ? false : true;
if (is_client_moving) {
LogDebug("ClientUpdate: Client is moving - scan timer is: [{}]", client_scan_npc_aggro_timer.GetDuration());
if (client_scan_npc_aggro_timer.GetDuration() > 1000) {
client_scan_npc_aggro_timer.Disable();
client_scan_npc_aggro_timer.Start(500);
}
}
else {
LogDebug("ClientUpdate: Client is NOT moving - scan timer is: [{}]", client_scan_npc_aggro_timer.GetDuration());
if (client_scan_npc_aggro_timer.GetDuration() < 1000) {
client_scan_npc_aggro_timer.Disable();
client_scan_npc_aggro_timer.Start(3000);
}
}
/**
* On a normal basis we limit mob movement updates based on distance
* This ensures we send a periodic full zone update to a client that has started moving after 5 or so minutes
*
* For very large zones we will also force a full update based on distance
*
* We ignore a small distance around us so that we don't interrupt already pathing deltas as those npcs will appear
* to full stop when they are actually still pathing
*/
float distance_moved = DistanceNoZ(GetLastPositionBeforeBulkUpdate(), GetPosition());
bool moved_far_enough_before_bulk_update = distance_moved >= zone->GetNpcPositionUpdateDistance();
bool is_ready_to_update = (
client_zone_wide_full_position_update_timer.Check() || moved_far_enough_before_bulk_update
);
if (is_client_moving && is_ready_to_update) {
LogDebug("[[{}]] Client Zone Wide Position Update NPCs", GetCleanName());
auto &mob_movement_manager = MobMovementManager::Get();
auto &mob_list = entity_list.GetMobList();
for (auto &it : mob_list) {
Mob *entity = it.second;
if (!entity->IsNPC()) {
continue;
}
int animation_speed = 0;
if (entity->IsMoving()) {
if (entity->IsRunning()) {
animation_speed = (entity->IsFeared() ? entity->GetFearSpeed() : entity->GetRunspeed());
}
else {
animation_speed = entity->GetWalkspeed();
}
}
mob_movement_manager.SendCommandToClients(entity, 0.0, 0.0, 0.0, 0.0, animation_speed, ClientRangeAny, this);
}
SetLastPositionBeforeBulkUpdate(GetPosition());
}
int32 new_animation = ppu->animation;
/* Update internal server position from what the client has sent */
m_Position.x = cx;
m_Position.y = cy;
m_Position.z = cz;
/* Visual Debugging */
if (RuleB(Character, OPClientUpdateVisualDebug)) {
LogDebug("ClientUpdate: ppu x: [{}] y: [{}] z: [{}] h: [{}]", cx, cy, cz, new_heading);
this->SendAppearanceEffect(78, 0, 0, 0, 0);
this->SendAppearanceEffect(41, 0, 0, 0, 0);
}
/* Only feed real time updates when client is moving */
if (is_client_moving || new_heading != m_Position.w || new_animation != animation) {
animation = ppu->animation;
m_Position.w = new_heading;
/* Broadcast update to other clients */
auto outapp = new EQApplicationPacket(OP_ClientUpdate, sizeof(PlayerPositionUpdateServer_Struct));
PlayerPositionUpdateServer_Struct *position_update = (PlayerPositionUpdateServer_Struct *) outapp->pBuffer;
MakeSpawnUpdate(position_update);
if (gm_hide_me) {
entity_list.QueueClientsStatus(this, outapp, true, Admin(), 255);
} else {
entity_list.QueueCloseClients(this, outapp, true, RuleI(Range, ClientPositionUpdates), nullptr, true);
}
/* Always send position updates to group - send when beyond normal ClientPositionUpdate range */
Group *group = this->GetGroup();
Raid *raid = this->GetRaid();
if (raid) {
raid->QueueClients(this, outapp, true, true, (RuleI(Range, ClientPositionUpdates) * -1));
} else if (group) {
group->QueueClients(this, outapp, true, true, (RuleI(Range, ClientPositionUpdates) * -1));
}
safe_delete(outapp);
}
if (zone->watermap) {
if (zone->watermap->InLiquid(glm::vec3(m_Position))) {
CheckIncreaseSkill(EQEmu::skills::SkillSwimming, nullptr, -17);
// Dismount horses when entering water
if (GetHorseId() && RuleB(Character, DismountWater)) {
SetHorseId(0);
BuffFadeByEffect(SE_SummonHorse);
}
}
CheckRegionTypeChanges();
}
}
void Client::Handle_OP_CombatAbility(const EQApplicationPacket *app)
{
if (app->size != sizeof(CombatAbility_Struct)) {
std::cout << "Wrong size on OP_CombatAbility. Got: " << app->size << ", Expected: " << sizeof(CombatAbility_Struct) << std::endl;
return;
}
auto ca_atk = (CombatAbility_Struct *)app->pBuffer;
OPCombatAbility(ca_atk);
return;
}
void Client::Handle_OP_ConfirmDelete(const EQApplicationPacket* app)
{
return;
}
void Client::Handle_OP_Consent(const EQApplicationPacket *app)
{
if (app->size<64) {
Consent_Struct* c = (Consent_Struct*)app->pBuffer;
ConsentCorpses(c->name, false);
}
}
void Client::Handle_OP_ConsentDeny(const EQApplicationPacket *app)
{
if (app->size<64) {
Consent_Struct* c = (Consent_Struct*)app->pBuffer;
ConsentCorpses(c->name, true);
}
}
void Client::Handle_OP_Consider(const EQApplicationPacket *app)
{
if (app->size != sizeof(Consider_Struct))
{
LogDebug("Size mismatch in Consider expected [{}] got [{}]", sizeof(Consider_Struct), app->size);
return;
}
Consider_Struct* conin = (Consider_Struct*)app->pBuffer;
Mob* tmob = entity_list.GetMob(conin->targetid);
if (tmob == 0)
return;
if (tmob->GetClass() == LDON_TREASURE)
{
Message(Chat::Yellow, "%s", tmob->GetCleanName());
return;
}
auto outapp = new EQApplicationPacket(OP_Consider, sizeof(Consider_Struct));
Consider_Struct* con = (Consider_Struct*)outapp->pBuffer;
con->playerid = GetID();
con->targetid = conin->targetid;
if (tmob->IsNPC())
con->faction = GetFactionLevel(character_id, tmob->GetNPCTypeID(), GetFactionRace(), class_, deity, (tmob->IsNPC()) ? tmob->CastToNPC()->GetPrimaryFaction() : 0, tmob); // Dec. 20, 2001; TODO: Send the players proper deity
else
con->faction = 1;
con->level = GetLevelCon(tmob->GetLevel());
if (ClientVersion() <= EQEmu::versions::ClientVersion::Titanium) {
if (con->level == CON_GRAY) {
con->level = CON_GREEN;
}
if (con->level == CON_WHITE) {
con->level = CON_WHITE_TITANIUM;
}
}
if (zone->IsPVPZone()) {
if (!tmob->IsNPC())
con->pvpcon = tmob->CastToClient()->GetPVP();
}
// If we're feigned show NPC as indifferent
if (tmob->IsNPC())
{
if (GetFeigned())
con->faction = FACTION_INDIFFERENT;
}
if (!(con->faction == FACTION_SCOWLS))
{
if (tmob->IsNPC())
{
if (tmob->CastToNPC()->IsOnHatelist(this))
con->faction = FACTION_THREATENLY;
}
}
if (con->faction == FACTION_APPREHENSIVE) {
con->faction = FACTION_SCOWLS;
}
else if (con->faction == FACTION_DUBIOUS) {
con->faction = FACTION_THREATENLY;
}
else if (con->faction == FACTION_SCOWLS) {
con->faction = FACTION_APPREHENSIVE;
}
else if (con->faction == FACTION_THREATENLY) {
con->faction = FACTION_DUBIOUS;
}
mod_consider(tmob, con);
QueuePacket(outapp);
// only wanted to check raid target once
// and need con to still be around so, do it here!
if (tmob->IsRaidTarget()) {
uint32 color = 0;
switch (con->level) {
case CON_GREEN:
color = 2;
break;
case CON_LIGHTBLUE:
color = 10;
break;
case CON_BLUE:
color = 4;
break;
case CON_WHITE_TITANIUM:
case CON_WHITE:
color = 10;
break;
case CON_YELLOW:
color = 15;
break;
case CON_RED:
color = 13;
break;
case CON_GRAY:
color = 6;
break;
}
if (ClientVersion() <= EQEmu::versions::ClientVersion::Titanium) {
if (color == 6) {
color = 2;
}
}
SendColoredText(color, std::string("This creature would take an army to defeat!"));
}
// this could be done better, but this is only called when you con so w/e
// Shroud of Stealth has a special message
if (improved_hidden && (!tmob->see_improved_hide && (tmob->see_invis || tmob->see_hide)))
MessageString(Chat::NPCQuestSay, SOS_KEEPS_HIDDEN);
// we are trying to hide but they can see us
else if ((invisible || invisible_undead || hidden || invisible_animals) && !IsInvisible(tmob))
MessageString(Chat::NPCQuestSay, SUSPECT_SEES_YOU);
safe_delete(outapp);
return;
}
void Client::Handle_OP_ConsiderCorpse(const EQApplicationPacket *app)
{
if (app->size != sizeof(Consider_Struct))
{
LogDebug("Size mismatch in Consider corpse expected [{}] got [{}]", sizeof(Consider_Struct), app->size);
return;
}
Consider_Struct* conin = (Consider_Struct*)app->pBuffer;
Corpse* tcorpse = entity_list.GetCorpseByID(conin->targetid);
if (tcorpse && tcorpse->IsNPCCorpse()) {
uint32 min; uint32 sec; uint32 ttime;
if ((ttime = tcorpse->GetDecayTime()) != 0) {
sec = (ttime / 1000) % 60; // Total seconds
min = (ttime / 60000) % 60; // Total seconds / 60 drop .00
char val1[20] = { 0 };
char val2[20] = { 0 };
MessageString(Chat::NPCQuestSay, CORPSE_DECAY1, ConvertArray(min, val1), ConvertArray(sec, val2));
}
else {
MessageString(Chat::NPCQuestSay, CORPSE_DECAY_NOW);
}
}
else if (tcorpse && tcorpse->IsPlayerCorpse()) {
uint32 day, hour, min, sec, ttime;
if ((ttime = tcorpse->GetDecayTime()) != 0) {
sec = (ttime / 1000) % 60; // Total seconds
min = (ttime / 60000) % 60; // Total seconds
hour = (ttime / 3600000) % 24; // Total hours
day = ttime / 86400000; // Total Days
if (day)
Message(0, "This corpse will decay in %i days, %i hours, %i minutes and %i seconds.", day, hour, min, sec);
else if (hour)
Message(0, "This corpse will decay in %i hours, %i minutes and %i seconds.", hour, min, sec);
else
Message(0, "This corpse will decay in %i minutes and %i seconds.", min, sec);
Message(0, "This corpse %s be resurrected.", tcorpse->IsRezzed() ? "cannot" : "can");
/*
hour = 0;
if((ttime = tcorpse->GetResTime()) != 0) {
sec = (ttime/1000)%60; // Total seconds
min = (ttime/60000)%60; // Total seconds
hour = (ttime/3600000)%24; // Total hours
if(hour)
Message(0, "This corpse can be resurrected for %i hours, %i minutes and %i seconds.", hour, min, sec);
else
Message(0, "This corpse can be resurrected for %i minutes and %i seconds.", min, sec);
}
else {
MessageString(Chat::White, CORPSE_TOO_OLD);
}
*/
}
else {
MessageString(Chat::NPCQuestSay, CORPSE_DECAY_NOW);
}
}
}
void Client::Handle_OP_Consume(const EQApplicationPacket *app)
{
if (app->size != sizeof(Consume_Struct))
{
LogError("OP size error: OP_Consume expected:[{}] got:[{}]", sizeof(Consume_Struct), app->size);
return;
}
Consume_Struct* pcs = (Consume_Struct*)app->pBuffer;
if (pcs->type == 0x01)
{
if (m_pp.hunger_level > 6000)
{
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct* sta = (Stamina_Struct*)outapp->pBuffer;
sta->food = m_pp.hunger_level > 6000 ? 6000 : m_pp.hunger_level;
sta->water = m_pp.thirst_level > 6000 ? 6000 : m_pp.thirst_level;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
}
else if (pcs->type == 0x02)
{
if (m_pp.thirst_level > 6000)
{
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct* sta = (Stamina_Struct*)outapp->pBuffer;
sta->food = m_pp.hunger_level > 6000 ? 6000 : m_pp.hunger_level;
sta->water = m_pp.thirst_level > 6000 ? 6000 : m_pp.thirst_level;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
}
EQEmu::ItemInstance *myitem = GetInv().GetItem(pcs->slot);
if (myitem == nullptr) {
LogError("Consuming from empty slot [{}]", pcs->slot);
return;
}
const EQEmu::ItemData* eat_item = myitem->GetItem();
if (pcs->type == 0x01) {
Consume(eat_item, EQEmu::item::ItemTypeFood, pcs->slot, (pcs->auto_consumed == 0xffffffff));
}
else if (pcs->type == 0x02) {
Consume(eat_item, EQEmu::item::ItemTypeDrink, pcs->slot, (pcs->auto_consumed == 0xffffffff));
}
else {
LogError("OP_Consume: unknown type, type:[{}]", (int)pcs->type);
return;
}
if (m_pp.hunger_level > 50000)
m_pp.hunger_level = 50000;
if (m_pp.thirst_level > 50000)
m_pp.thirst_level = 50000;
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct* sta = (Stamina_Struct*)outapp->pBuffer;
sta->food = m_pp.hunger_level > 6000 ? 6000 : m_pp.hunger_level;
sta->water = m_pp.thirst_level > 6000 ? 6000 : m_pp.thirst_level;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_OP_ControlBoat(const EQApplicationPacket *app)
{
if (app->size != sizeof(ControlBoat_Struct)) {
LogError("Wrong size: OP_ControlBoat, size=[{}], expected [{}]", app->size, sizeof(ControlBoat_Struct));
return;
}
ControlBoat_Struct* cbs = (ControlBoat_Struct*)app->pBuffer;
Mob* boat = entity_list.GetMob(cbs->boatId);
if (!boat) {
LogError("Player tried to take control of non-existent boat (char_id: %u, boat_eid: %u)", CharacterID(), cbs->boatId);
return; // do nothing if the boat isn't valid
}
if (!boat->IsNPC() || !boat->IsControllableBoat())
{
char *hacked_string = nullptr;
MakeAnyLenString(&hacked_string, "OP_Control Boat was sent against %s which is of race %u", boat->GetName(), boat->GetRace());
database.SetMQDetectionFlag(this->AccountName(), this->GetName(), hacked_string, zone->GetShortName());
safe_delete_array(hacked_string);
return;
}
if (cbs->TakeControl) {
// this uses the boat's target to indicate who has control of it. It has to check hate to make sure the boat isn't actually attacking anyone.
if (!boat->GetTarget() || (boat->GetTarget() == this && boat->GetHateAmount(this) == 0)) {
boat->SetTarget(this);
}
else {
this->MessageString(Chat::Red, IN_USE);
return;
}
}
else {
if (boat->GetTarget() == this) {
boat->SetTarget(nullptr);
}
}
// client responds better to a packet echo than an empty op
QueuePacket(app);
// have the boat signal itself, so quests can be triggered by boat use
boat->CastToNPC()->SignalNPC(0);
}
void Client::Handle_OP_CorpseDrag(const EQApplicationPacket *app)
{
if (DraggedCorpses.size() >= (unsigned int)RuleI(Character, MaxDraggedCorpses))
{
MessageString(Chat::Red, CORPSEDRAG_LIMIT);
return;
}
VERIFY_PACKET_LENGTH(OP_CorpseDrag, app, CorpseDrag_Struct);
CorpseDrag_Struct *cds = (CorpseDrag_Struct*)app->pBuffer;
Mob* corpse = entity_list.GetMob(cds->CorpseName);
if (!corpse || !corpse->IsPlayerCorpse() || corpse->CastToCorpse()->IsBeingLooted())
return;
Client *c = entity_list.FindCorpseDragger(corpse->GetID());
if (c)
{
if (c == this)
MessageString(Chat::DefaultText, CORPSEDRAG_ALREADY, corpse->GetCleanName());
else
MessageString(Chat::DefaultText, CORPSEDRAG_SOMEONE_ELSE, corpse->GetCleanName());
return;
}
if (!corpse->CastToCorpse()->Summon(this, false, true))
return;
DraggedCorpses.push_back(std::pair<std::string, uint16>(cds->CorpseName, corpse->GetID()));
MessageString(Chat::DefaultText, CORPSEDRAG_BEGIN, cds->CorpseName);
}
void Client::Handle_OP_CorpseDrop(const EQApplicationPacket *app)
{
if (app->size == 1)
{
MessageString(Chat::DefaultText, CORPSEDRAG_STOPALL);
ClearDraggedCorpses();
return;
}
for (auto Iterator = DraggedCorpses.begin(); Iterator != DraggedCorpses.end(); ++Iterator)
{
if (!strcasecmp(Iterator->first.c_str(), (const char *)app->pBuffer))
{
MessageString(Chat::DefaultText, CORPSEDRAG_STOP);
Iterator = DraggedCorpses.erase(Iterator);
return;
}
}
}
void Client::Handle_OP_CrashDump(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_CreateObject(const EQApplicationPacket *app)
{
if (LogSys.log_settings[Logs::Inventory].is_category_enabled)
LogInventory("Handle_OP_CreateObject() [psize: [{}]] [{}]", app->size, DumpPacketToString(app).c_str());
DropItem(EQEmu::invslot::slotCursor);
return;
}
void Client::Handle_OP_CrystalCreate(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_CrystalCreate, app, CrystalReclaim_Struct);
CrystalReclaim_Struct *cr = (CrystalReclaim_Struct*)app->pBuffer;
const uint32 requestQty = cr->amount;
const bool isRadiant = cr->type == 4;
const bool isEbon = cr->type == 5;
// Check: Valid type requested.
if (!isRadiant && !isEbon) {
return;
}
// Check: Valid quantity requested.
if (requestQty < 1) {
return;
}
// Check: Valid client state to make request.
// In this situation the client is either desynced or attempting an exploit.
const uint32 currentQty = isRadiant ? GetRadiantCrystals() : GetEbonCrystals();
if (currentQty == 0) {
return;
}
// Prevent the client from creating more than they have.
const uint32 amount = EQEmu::ClampUpper(requestQty, currentQty);
const uint32 itemID = isRadiant ? RuleI(Zone, RadiantCrystalItemID) : RuleI(Zone, EbonCrystalItemID);
// Summon crystals for player.
const bool success = SummonItem(itemID, amount);
if (!success) {
return;
}
// Deduct crystals from client and update them.
if (isRadiant) {
m_pp.currentRadCrystals -= amount;
m_pp.careerRadCrystals -= amount;
}
else if (isEbon) {
m_pp.currentEbonCrystals -= amount;
m_pp.careerEbonCrystals -= amount;
}
SaveCurrency();
SendCrystalCounts();
}
void Client::Handle_OP_CrystalReclaim(const EQApplicationPacket *app)
{
uint32 ebon = NukeItem(RuleI(Zone, EbonCrystalItemID), invWhereWorn | invWherePersonal | invWhereCursor);
uint32 radiant = NukeItem(RuleI(Zone, RadiantCrystalItemID), invWhereWorn | invWherePersonal | invWhereCursor);
if ((ebon + radiant) > 0) {
AddCrystals(radiant, ebon);
}
}
void Client::Handle_OP_Damage(const EQApplicationPacket *app)
{
if (app->size != sizeof(CombatDamage_Struct)) {
LogError("Received invalid sized OP_Damage: got [{}], expected [{}]", app->size, sizeof(CombatDamage_Struct));
DumpPacket(app);
return;
}
// Broadcast to other clients
CombatDamage_Struct* damage = (CombatDamage_Struct*)app->pBuffer;
//dont send to originator of falling damage packets
entity_list.QueueClients(this, app, (damage->type == DamageTypeFalling));
return;
}
void Client::Handle_OP_Death(const EQApplicationPacket *app)
{
if (app->size != sizeof(Death_Struct))
return;
Death_Struct* ds = (Death_Struct*)app->pBuffer;
//I think this attack_skill value is really a value from SkillDamageTypes...
if (ds->attack_skill > EQEmu::skills::HIGHEST_SKILL) {
return;
}
if (GetHP() > 0)
return;
Mob* killer = entity_list.GetMob(ds->killer_id);
Death(killer, ds->damage, ds->spell_id, (EQEmu::skills::SkillType)ds->attack_skill);
return;
}
void Client::Handle_OP_DelegateAbility(const EQApplicationPacket *app)
{
if (app->size != sizeof(DelegateAbility_Struct))
{
LogDebug("Size mismatch in OP_DelegateAbility expected [{}] got [{}]", sizeof(DelegateAbility_Struct), app->size);
DumpPacket(app);
return;
}
DelegateAbility_Struct* das = (DelegateAbility_Struct*)app->pBuffer;
Group *g = GetGroup();
if (!g) return;
switch (das->DelegateAbility)
{
case 0:
{
g->DelegateMainAssist(das->Name);
break;
}
case 1:
{
g->DelegateMarkNPC(das->Name);
break;
}
case 2:
{
g->DelegateMainTank(das->Name);
break;
}
case 3:
{
g->DelegatePuller(das->Name);
break;
}
default:
break;
}
}
void Client::Handle_OP_DeleteItem(const EQApplicationPacket *app)
{
if (app->size != sizeof(DeleteItem_Struct)) {
std::cout << "Wrong size on OP_DeleteItem. Got: " << app->size << ", Expected: " << sizeof(DeleteItem_Struct) << std::endl;
return;
}
DeleteItem_Struct* alc = (DeleteItem_Struct*)app->pBuffer;
const EQEmu::ItemInstance *inst = GetInv().GetItem(alc->from_slot);
if (inst && inst->GetItem()->ItemType == EQEmu::item::ItemTypeAlcohol) {
entity_list.MessageCloseString(this, true, 50, 0, DRINKING_MESSAGE, GetName(), inst->GetItem()->Name);
CheckIncreaseSkill(EQEmu::skills::SkillAlcoholTolerance, nullptr, 25);
int16 AlcoholTolerance = GetSkill(EQEmu::skills::SkillAlcoholTolerance);
int16 IntoxicationIncrease;
if (ClientVersion() < EQEmu::versions::ClientVersion::SoD)
IntoxicationIncrease = (200 - AlcoholTolerance) * 30 / 200 + 10;
else
IntoxicationIncrease = (270 - AlcoholTolerance) * 0.111111108 + 10;
if (IntoxicationIncrease < 0)
IntoxicationIncrease = 1;
m_pp.intoxication += IntoxicationIncrease;
if (m_pp.intoxication > 200)
m_pp.intoxication = 200;
}
DeleteItemInInventory(alc->from_slot, 1);
return;
}
void Client::Handle_OP_DeleteSpawn(const EQApplicationPacket *app)
{
// The client will send this with his id when he zones, maybe when he disconnects too?
//eqs->RemoveData(); // Flushing the queue of packet data to allow for proper zoning
//just make sure this gets out
auto outapp = new EQApplicationPacket(OP_LogoutReply);
FastQueuePacket(&outapp);
outapp = new EQApplicationPacket(OP_DeleteSpawn, sizeof(EntityId_Struct));
EntityId_Struct* eid = (EntityId_Struct*)outapp->pBuffer;
eid->entity_id = GetID();
entity_list.QueueClients(this, outapp, false);
safe_delete(outapp);
hate_list.RemoveEntFromHateList(this->CastToMob());
Disconnect();
return;
}
void Client::Handle_OP_Disarm(const EQApplicationPacket *app) {
if (dead || bZoning) return;
if (!HasSkill(EQEmu::skills::SkillDisarm))
return;
if (app->size != sizeof(Disarm_Struct)) {
LogSkills("Size mismatch for Disarm_Struct packet");
return;
}
Disarm_Struct *disarm = (Disarm_Struct *)app->pBuffer;
if (!p_timers.Expired(&database, pTimerCombatAbility2, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerCombatAbility2, 8);
BreakInvis();
Mob* pmob = entity_list.GetMob(disarm->source);
Mob* tmob = entity_list.GetMob(disarm->target);
if (!pmob || !tmob)
return;
if (pmob->GetID() != GetID()) {
// Client sent a disarm request with an originator ID not matching their own ID.
char *hack_str = NULL;
MakeAnyLenString(&hack_str, "Player %s (%d) sent OP_Disarm with source ID of: %d", GetCleanName(), GetID(), pmob->GetID());
database.SetMQDetectionFlag(this->account_name, this->name, hack_str, zone->GetShortName());
safe_delete_array(hack_str);
return;
}
// No disarm on corpses
if (tmob->IsCorpse())
return;
// No target
if (!GetTarget())
return;
// Targets don't match (possible hack, but not flagging)
if (GetTarget() != tmob) {
return;
}
// Too far away
if (pmob->CalculateDistance(GetTarget()->GetX(), GetTarget()->GetY(), GetTarget()->GetZ()) > 400)
return;
// Can't see mob
//if (tmob->BehindMob(pmob))
// return;
// How can we disarm someone if we are feigned.
if (GetFeigned())
return;
// We can't disarm someone who is feigned.
if (tmob->IsClient() && tmob->CastToClient()->GetFeigned())
return;
if (GetTarget() == tmob && pmob == this->CastToMob() &&
disarm->skill == GetSkill(EQEmu::skills::SkillDisarm) && IsAttackAllowed(tmob)) {
int p_level = pmob->GetLevel() ? pmob->GetLevel() : 1;
int t_level = tmob->GetLevel() ? tmob->GetLevel() : 1;
// We have a disarmable target - sucess or fail, we always aggro the mob
if (tmob->IsNPC()) {
if (!tmob->CheckAggro(pmob)) {
zone->AddAggroMob();
tmob->AddToHateList(pmob, p_level);
}
else {
tmob->AddToHateList(pmob, p_level / 3);
}
}
int chance = GetSkill(EQEmu::skills::SkillDisarm); // (1% @ 0 skill) (11% @ 200 skill) - against even con
chance /= 2;
chance += 10;
// Modify chance based on level difference
float lvl_mod = p_level / t_level;
chance *= lvl_mod;
if (chance > 300)
chance = 300; // max chance of 30%
if (tmob->IsNPC()) {
tmob->CastToNPC()->Disarm(this, chance);
}
else if (tmob->IsClient()) {
tmob->CastToClient()->Disarm(this, chance);
}
return;
}
// Trying to disarm something we can't disarm
MessageString(Chat::Skills, DISARM_NO_TARGET);
return;
}
void Client::Handle_OP_DeleteSpell(const EQApplicationPacket *app)
{
if (app->size != sizeof(DeleteSpell_Struct))
return;
EQApplicationPacket* outapp = app->Copy();
DeleteSpell_Struct* dss = (DeleteSpell_Struct*)outapp->pBuffer;
if (dss->spell_slot < 0 || dss->spell_slot >= EQEmu::spells::DynamicLookup(ClientVersion(), GetGM())->SpellbookSize)
return;
if (m_pp.spell_book[dss->spell_slot] != SPELLBOOK_UNKNOWN) {
m_pp.spell_book[dss->spell_slot] = SPELLBOOK_UNKNOWN;
database.DeleteCharacterSpell(this->CharacterID(), m_pp.spell_book[dss->spell_slot], dss->spell_slot);
dss->success = 1;
}
else
dss->success = 0;
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_DisarmTraps(const EQApplicationPacket *app)
{
if (!HasSkill(EQEmu::skills::SkillDisarmTraps))
return;
if (!p_timers.Expired(&database, pTimerDisarmTraps, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
int reuse = DisarmTrapsReuseTime - GetSkillReuseTime(EQEmu::skills::SkillDisarmTraps);
if (reuse < 1)
reuse = 1;
p_timers.Start(pTimerDisarmTraps, reuse - 1);
uint8 success = SKILLUP_FAILURE;
float curdist = 0;
Trap* trap = entity_list.FindNearbyTrap(this, 250, curdist, true);
if (trap && trap->detected)
{
float max_radius = (trap->radius * 2) * (trap->radius * 2); // radius is used to trigger trap, so disarm radius should be a bit bigger.
Log(Logs::General, Logs::Traps, "%s is attempting to disarm trap %d. Curdist is %0.2f maxdist is %0.2f", GetName(), trap->trap_id, curdist, max_radius);
if (curdist <= max_radius)
{
int uskill = GetSkill(EQEmu::skills::SkillDisarmTraps);
if ((zone->random.Int(0, 49) + uskill) >= (zone->random.Int(0, 49) + trap->skill))
{
success = SKILLUP_SUCCESS;
MessageString(Chat::Skills, DISARMED_TRAP);
trap->disarmed = true;
Log(Logs::General, Logs::Traps, "Trap %d is disarmed.", trap->trap_id);
trap->UpdateTrap();
}
else
{
MessageString(Chat::Skills, FAIL_DISARM_DETECTED_TRAP);
if (zone->random.Int(0, 99) < 25) {
trap->Trigger(this);
}
}
CheckIncreaseSkill(EQEmu::skills::SkillDisarmTraps, nullptr);
return;
}
else
{
MessageString(Chat::Skills, TRAP_TOO_FAR);
}
}
else
{
MessageString(Chat::Skills, LDON_SENSE_TRAP2);
}
return;
}
void Client::Handle_OP_DoGroupLeadershipAbility(const EQApplicationPacket *app)
{
if (app->size != sizeof(DoGroupLeadershipAbility_Struct)) {
LogDebug("Size mismatch in OP_DoGroupLeadershipAbility expected [{}] got [{}]", sizeof(DoGroupLeadershipAbility_Struct), app->size);
DumpPacket(app);
return;
}
DoGroupLeadershipAbility_Struct* dglas = (DoGroupLeadershipAbility_Struct*)app->pBuffer;
switch (dglas->Ability)
{
case GroupLeadershipAbility_MarkNPC:
{
if (GetTarget())
{
Group* g = GetGroup();
if (g)
g->MarkNPC(GetTarget(), dglas->Parameter);
}
break;
}
case groupAAInspectBuffs:
{
Mob *Target = GetTarget();
if (!Target || !Target->IsClient())
return;
if (IsRaidGrouped()) {
Raid *raid = GetRaid();
if (!raid)
return;
uint32 group_id = raid->GetGroup(this);
if (group_id > 11 || raid->GroupCount(group_id) < 3)
return;
Target->CastToClient()->InspectBuffs(this, raid->GetLeadershipAA(groupAAInspectBuffs, group_id));
return;
}
Group *g = GetGroup();
if (!g || (g->GroupCount() < 3))
return;
Target->CastToClient()->InspectBuffs(this, g->GetLeadershipAA(groupAAInspectBuffs));
break;
}
default:
LogDebug("Got unhandled OP_DoGroupLeadershipAbility Ability: [{}] Parameter: [{}]", dglas->Ability, dglas->Parameter);
break;
}
}
void Client::Handle_OP_DuelResponse(const EQApplicationPacket *app)
{
if (app->size != sizeof(DuelResponse_Struct))
return;
DuelResponse_Struct* ds = (DuelResponse_Struct*)app->pBuffer;
Entity* entity = entity_list.GetID(ds->target_id);
Entity* initiator = entity_list.GetID(ds->entity_id);
if (!entity->IsClient() || !initiator->IsClient())
return;
entity->CastToClient()->SetDuelTarget(0);
entity->CastToClient()->SetDueling(false);
initiator->CastToClient()->SetDuelTarget(0);
initiator->CastToClient()->SetDueling(false);
if (GetID() == initiator->GetID())
entity->CastToClient()->MessageString(Chat::NPCQuestSay, DUEL_DECLINE, initiator->GetName());
else
initiator->CastToClient()->MessageString(Chat::NPCQuestSay, DUEL_DECLINE, entity->GetName());
return;
}
void Client::Handle_OP_DuelResponse2(const EQApplicationPacket *app)
{
if (app->size != sizeof(Duel_Struct))
return;
Duel_Struct* ds = (Duel_Struct*)app->pBuffer;
Entity* entity = entity_list.GetID(ds->duel_target);
Entity* initiator = entity_list.GetID(ds->duel_initiator);
if (entity && initiator && entity == this && initiator->IsClient()) {
auto outapp = new EQApplicationPacket(OP_RequestDuel, sizeof(Duel_Struct));
Duel_Struct* ds2 = (Duel_Struct*)outapp->pBuffer;
ds2->duel_initiator = entity->GetID();
ds2->duel_target = entity->GetID();
initiator->CastToClient()->QueuePacket(outapp);
outapp->SetOpcode(OP_DuelResponse2);
ds2->duel_initiator = initiator->GetID();
initiator->CastToClient()->QueuePacket(outapp);
QueuePacket(outapp);
SetDueling(true);
initiator->CastToClient()->SetDueling(true);
SetDuelTarget(ds->duel_initiator);
safe_delete(outapp);
if (IsCasting())
InterruptSpell();
if (initiator->CastToClient()->IsCasting())
initiator->CastToClient()->InterruptSpell();
}
return;
}
void Client::Handle_OP_DumpName(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_Dye(const EQApplicationPacket *app)
{
if (app->size != sizeof(EQEmu::TintProfile))
printf("Wrong size of DyeStruct, Got: %i, Expected: %zu\n", app->size, sizeof(EQEmu::TintProfile));
else {
EQEmu::TintProfile* dye = (EQEmu::TintProfile*)app->pBuffer;
DyeArmor(dye);
}
return;
}
void Client::Handle_OP_Emote(const EQApplicationPacket *app)
{
if (app->size != sizeof(Emote_Struct)) {
LogError("Received invalid sized OP_Emote: got [{}], expected [{}]", app->size, sizeof(Emote_Struct));
DumpPacket(app);
return;
}
// Calculate new packet dimensions
Emote_Struct* in = (Emote_Struct*)app->pBuffer;
in->message[1023] = '\0';
const char* name = GetName();
uint32 len_name = strlen(name);
uint32 len_msg = strlen(in->message);
// crash protection -- cheater
if (len_msg > 512) {
in->message[512] = '\0';
len_msg = 512;
}
uint32 len_packet = sizeof(in->type) + len_name
+ len_msg + 1;
// Construct outgoing packet
auto outapp = new EQApplicationPacket(OP_Emote, len_packet);
Emote_Struct* out = (Emote_Struct*)outapp->pBuffer;
out->type = in->type;
memcpy(out->message, name, len_name);
memcpy(&out->message[len_name], in->message, len_msg);
/*
if (target && target->IsClient()) {
entity_list.QueueCloseClients(this, outapp, false, 100, target);
cptr = outapp->pBuffer + 2;
// not sure if live does this or not. thought it was a nice feature, but would take a lot to
// clean up grammatical and other errors. Maybe with a regex parser...
replacestr((char *)cptr, target->GetName(), "you");
replacestr((char *)cptr, " he", " you");
replacestr((char *)cptr, " she", " you");
replacestr((char *)cptr, " him", " you");
replacestr((char *)cptr, " her", " you");
target->CastToClient()->QueuePacket(outapp);
}
else
*/
entity_list.QueueCloseClients(this, outapp, true, RuleI(Range, Emote), 0, true, FilterSocials);
safe_delete(outapp);
return;
}
void Client::Handle_OP_EndLootRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
std::cout << "Wrong size: OP_EndLootRequest, size=" << app->size << ", expected " << sizeof(uint32) << std::endl;
return;
}
SetLooting(0);
Entity* entity = entity_list.GetID(*((uint16*)app->pBuffer));
if (entity == 0) {
Message(Chat::Red, "Error: OP_EndLootRequest: Corpse not found (ent = 0)");
if (ClientVersion() >= EQEmu::versions::ClientVersion::SoD)
Corpse::SendEndLootErrorPacket(this);
else
Corpse::SendLootReqErrorPacket(this);
return;
}
else if (!entity->IsCorpse()) {
Message(Chat::Red, "Error: OP_EndLootRequest: Corpse not found (!entity->IsCorpse())");
Corpse::SendLootReqErrorPacket(this);
return;
}
else {
entity->CastToCorpse()->EndLoot(this, app);
}
return;
}
void Client::Handle_OP_EnvDamage(const EQApplicationPacket *app)
{
if (!ClientFinishedLoading())
{
SetHP(GetHP() - 1);
return;
}
if (app->size != sizeof(EnvDamage2_Struct)) {
LogError("Received invalid sized OP_EnvDamage: got [{}], expected [{}]", app->size, sizeof(EnvDamage2_Struct));
DumpPacket(app);
return;
}
EnvDamage2_Struct* ed = (EnvDamage2_Struct*)app->pBuffer;
int damage = ed->damage;
if (ed->dmgtype == 252) {
int mod = spellbonuses.ReduceFallDamage + itembonuses.ReduceFallDamage + aabonuses.ReduceFallDamage;
damage -= damage * mod / 100;
}
if (damage < 0)
damage = 31337;
if (admin >= minStatusToAvoidFalling && GetGM()) {
Message(Chat::Red, "Your GM status protects you from %i points of type %i environmental damage.", ed->damage, ed->dmgtype);
SetHP(GetHP() - 1);//needed or else the client wont acknowledge
return;
}
else if (GetInvul()) {
Message(Chat::Red, "Your invuln status protects you from %i points of type %i environmental damage.", ed->damage, ed->dmgtype);
SetHP(GetHP() - 1);//needed or else the client wont acknowledge
return;
}
else if (zone->GetZoneID() == 183 || zone->GetZoneID() == 184) {
// Hard coded tutorial and load zones for no fall damage
return;
}
else {
SetHP(GetHP() - (damage * RuleR(Character, EnvironmentDamageMulipliter)));
/* EVENT_ENVIRONMENTAL_DAMAGE */
int final_damage = (damage * RuleR(Character, EnvironmentDamageMulipliter));
char buf[24];
snprintf(buf, 23, "%u %u %i", ed->damage, ed->dmgtype, final_damage);
parse->EventPlayer(EVENT_ENVIRONMENTAL_DAMAGE, this, buf, 0);
}
if (GetHP() <= 0) {
mod_client_death_env();
Death(0, 32000, SPELL_UNKNOWN, EQEmu::skills::SkillHandtoHand);
}
SendHPUpdate();
return;
}
void Client::Handle_OP_FaceChange(const EQApplicationPacket *app)
{
if (app->size != sizeof(FaceChange_Struct)) {
LogError("Invalid size for OP_FaceChange: Expected: [{}], Got: [{}]",
sizeof(FaceChange_Struct), app->size);
return;
}
// Notify other clients in zone
entity_list.QueueClients(this, app, false);
FaceChange_Struct* fc = (FaceChange_Struct*)app->pBuffer;
m_pp.haircolor = fc->haircolor;
m_pp.beardcolor = fc->beardcolor;
m_pp.eyecolor1 = fc->eyecolor1;
m_pp.eyecolor2 = fc->eyecolor2;
m_pp.hairstyle = fc->hairstyle;
m_pp.face = fc->face;
m_pp.beard = fc->beard;
m_pp.drakkin_heritage = fc->drakkin_heritage;
m_pp.drakkin_tattoo = fc->drakkin_tattoo;
m_pp.drakkin_details = fc->drakkin_details;
Save();
MessageString(Chat::Red, FACE_ACCEPTED);
//Message(Chat::Red, "Facial features updated.");
return;
}
void Client::Handle_OP_FeignDeath(const EQApplicationPacket *app)
{
if (GetClass() != MONK)
return;
if (!p_timers.Expired(&database, pTimerFeignDeath, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
int reuse = FeignDeathReuseTime;
reuse -= GetSkillReuseTime(EQEmu::skills::SkillFeignDeath);
if (reuse < 1)
reuse = 1;
p_timers.Start(pTimerFeignDeath, reuse - 1);
//BreakInvis();
uint16 primfeign = GetSkill(EQEmu::skills::SkillFeignDeath);
uint16 secfeign = GetSkill(EQEmu::skills::SkillFeignDeath);
if (primfeign > 100) {
primfeign = 100;
secfeign = secfeign - 100;
secfeign = secfeign / 2;
}
else
secfeign = 0;
uint16 totalfeign = primfeign + secfeign;
if (zone->random.Real(0, 160) > totalfeign) {
SetFeigned(false);
entity_list.MessageCloseString(this, false, 200, 10, STRING_FEIGNFAILED, GetName());
}
else {
SetFeigned(true);
}
CheckIncreaseSkill(EQEmu::skills::SkillFeignDeath, nullptr, 5);
return;
}
void Client::Handle_OP_FindPersonRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(FindPersonRequest_Struct))
printf("Error in FindPersonRequest_Struct. Expected size of: %zu, but got: %i\n", sizeof(FindPersonRequest_Struct), app->size);
else {
FindPersonRequest_Struct* t = (FindPersonRequest_Struct*)app->pBuffer;
std::vector<FindPerson_Point> points;
Mob* target = entity_list.GetMob(t->npc_id);
if (target == nullptr) {
//empty length packet == not found.
EQApplicationPacket outapp(OP_FindPersonReply, 0);
QueuePacket(&outapp);
return;
}
if (!RuleB(Pathing, Find) && RuleB(Bazaar, EnableWarpToTrader) && target->IsClient() && (target->CastToClient()->Trader ||
target->CastToClient()->Buyer)) {
Message(Chat::Yellow, "Moving you to Trader %s", target->GetName());
MovePC(zone->GetZoneID(), zone->GetInstanceID(), target->GetX(), target->GetY(), target->GetZ(), 0.0f);
}
if (!RuleB(Pathing, Find) || !zone->pathing)
{
//fill in the path array...
//
points.clear();
FindPerson_Point a;
FindPerson_Point b;
a.x = GetX();
a.y = GetY();
a.z = GetZ();
b.x = target->GetX();
b.y = target->GetY();
b.z = target->GetZ();
points.push_back(a);
points.push_back(b);
}
else
{
glm::vec3 Start(GetX(), GetY(), GetZ() + (GetSize() < 6.0 ? 6 : GetSize()) * HEAD_POSITION);
glm::vec3 End(target->GetX(), target->GetY(), target->GetZ() + (target->GetSize() < 6.0 ? 6 : target->GetSize()) * HEAD_POSITION);
bool partial = false;
bool stuck = false;
auto pathlist = zone->pathing->FindRoute(Start, End, partial, stuck);
if (pathlist.empty() || partial)
{
EQApplicationPacket outapp(OP_FindPersonReply, 0);
QueuePacket(&outapp);
return;
}
// Live appears to send the points in this order:
// Final destination.
// Current Position.
// rest of the points.
FindPerson_Point p;
int PointNumber = 0;
bool LeadsToTeleporter = false;
auto v = pathlist.back();
p.x = v.pos.x;
p.y = v.pos.y;
p.z = v.pos.z;
points.push_back(p);
p.x = GetX();
p.y = GetY();
p.z = GetZ();
points.push_back(p);
for (auto Iterator = pathlist.begin(); Iterator != pathlist.end(); ++Iterator)
{
if ((*Iterator).teleport) // Teleporter
{
LeadsToTeleporter = true;
break;
}
glm::vec3 v = (*Iterator).pos;
p.x = v.x;
p.y = v.y;
p.z = v.z;
points.push_back(p);
++PointNumber;
}
if (!LeadsToTeleporter)
{
p.x = target->GetX();
p.y = target->GetY();
p.z = target->GetZ();
points.push_back(p);
}
}
SendPathPacket(points);
}
}
void Client::Handle_OP_Fishing(const EQApplicationPacket *app)
{
if (!p_timers.Expired(&database, pTimerFishing, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
if (CanFish()) {
parse->EventPlayer(EVENT_FISH_START, this, "", 0);
//these will trigger GoFish() after a delay if we're able to actually fish, and if not, we won't stop the client from trying again immediately (although we may need to tell it to repop the button)
p_timers.Start(pTimerFishing, FishingReuseTime - 1);
fishing_timer.Start();
}
return;
// Changes made based on Bobs work on foraging. Now can set items in the forage database table to
// forage for.
}
void Client::Handle_OP_Forage(const EQApplicationPacket *app)
{
if (!p_timers.Expired(&database, pTimerForaging, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerForaging, ForagingReuseTime - 1);
ForageItem();
return;
}
void Client::Handle_OP_FriendsWho(const EQApplicationPacket *app)
{
char *FriendsString = (char*)app->pBuffer;
FriendsWho(FriendsString);
return;
}
void Client::Handle_OP_GetGuildMOTD(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GetGuildMOTD");
SendGuildMOTD(true);
if (IsInAGuild())
{
SendGuildURL();
SendGuildChannel();
}
}
void Client::Handle_OP_GetGuildsList(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GetGuildsList");
SendGuildList();
}
void Client::Handle_OP_GMBecomeNPC(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/becomenpc");
return;
}
if (app->size != sizeof(BecomeNPC_Struct)) {
LogError("Wrong size: OP_GMBecomeNPC, size=[{}], expected [{}]", app->size, sizeof(BecomeNPC_Struct));
return;
}
//entity_list.QueueClients(this, app, false);
BecomeNPC_Struct* bnpc = (BecomeNPC_Struct*)app->pBuffer;
Mob* cli = (Mob*)entity_list.GetMob(bnpc->id);
if (cli == 0)
return;
if (cli->IsClient())
cli->CastToClient()->QueuePacket(app);
cli->SendAppearancePacket(AT_NPCName, 1, true);
cli->CastToClient()->SetBecomeNPC(true);
cli->CastToClient()->SetBecomeNPCLevel(bnpc->maxlevel);
cli->MessageString(Chat::White, TOGGLE_OFF);
cli->CastToClient()->tellsoff = true;
//TODO: Make this toggle a BecomeNPC flag so that it gets updated when people zone in as well; Make combat work with this.
return;
}
void Client::Handle_OP_GMDelCorpse(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMDelCorpse_Struct))
return;
if (this->Admin() < commandEditPlayerCorpses) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/delcorpse");
return;
}
GMDelCorpse_Struct* dc = (GMDelCorpse_Struct *)app->pBuffer;
Mob* corpse = entity_list.GetMob(dc->corpsename);
if (corpse == 0) {
return;
}
if (corpse->IsCorpse() != true) {
return;
}
corpse->CastToCorpse()->Delete();
std::cout << name << " deleted corpse " << dc->corpsename << std::endl;
Message(Chat::Red, "Corpse %s deleted.", dc->corpsename);
return;
}
void Client::Handle_OP_GMEmoteZone(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/emote");
return;
}
if (app->size != sizeof(GMEmoteZone_Struct)) {
LogError("Wrong size: OP_GMEmoteZone, size=[{}], expected [{}]", app->size, sizeof(GMEmoteZone_Struct));
return;
}
GMEmoteZone_Struct* gmez = (GMEmoteZone_Struct*)app->pBuffer;
char* newmessage = nullptr;
if (strstr(gmez->text, "^") == 0)
entity_list.Message(0, 15, gmez->text);
else {
for (newmessage = strtok((char*)gmez->text, "^"); newmessage != nullptr; newmessage = strtok(nullptr, "^"))
entity_list.Message(0, 15, newmessage);
}
return;
}
void Client::Handle_OP_GMEndTraining(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMTrainEnd_Struct)) {
LogDebug("Size mismatch in OP_GMEndTraining expected [{}] got [{}]", sizeof(GMTrainEnd_Struct), app->size);
DumpPacket(app);
return;
}
OPGMEndTraining(app);
return;
}
void Client::Handle_OP_GMFind(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/find");
return;
}
if (app->size != sizeof(GMSummon_Struct)) {
LogError("Wrong size: OP_GMFind, size=[{}], expected [{}]", app->size, sizeof(GMSummon_Struct));
return;
}
//Break down incoming
GMSummon_Struct* request = (GMSummon_Struct*)app->pBuffer;
//Create a new outgoing
auto outapp = new EQApplicationPacket(OP_GMFind, sizeof(GMSummon_Struct));
GMSummon_Struct* foundplayer = (GMSummon_Struct*)outapp->pBuffer;
//Copy the constants
strcpy(foundplayer->charname, request->charname);
strcpy(foundplayer->gmname, request->gmname);
//Check if the NPC exits intrazone...
Mob* gt = entity_list.GetMob(request->charname);
if (gt != 0) {
foundplayer->success = 1;
foundplayer->x = (int32)gt->GetX();
foundplayer->y = (int32)gt->GetY();
foundplayer->z = (int32)gt->GetZ();
foundplayer->zoneID = zone->GetZoneID();
}
//Send the packet...
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_GMGoto(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMSummon_Struct)) {
std::cout << "Wrong size on OP_GMGoto. Got: " << app->size << ", Expected: " << sizeof(GMSummon_Struct) << std::endl;
return;
}
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/goto");
return;
}
GMSummon_Struct* gmg = (GMSummon_Struct*)app->pBuffer;
Mob* gt = entity_list.GetMob(gmg->charname);
if (gt != nullptr) {
this->MovePC(zone->GetZoneID(), zone->GetInstanceID(), gt->GetX(), gt->GetY(), gt->GetZ(), gt->GetHeading());
}
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected.");
else {
auto pack = new ServerPacket(ServerOP_GMGoto, sizeof(ServerGMGoto_Struct));
memset(pack->pBuffer, 0, pack->size);
ServerGMGoto_Struct* wsgmg = (ServerGMGoto_Struct*)pack->pBuffer;
strcpy(wsgmg->myname, this->GetName());
strcpy(wsgmg->gotoname, gmg->charname);
wsgmg->admin = admin;
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
void Client::Handle_OP_GMHideMe(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/hideme");
return;
}
if (app->size != sizeof(SpawnAppearance_Struct)) {
LogError("Wrong size: OP_GMHideMe, size=[{}], expected [{}]", app->size, sizeof(SpawnAppearance_Struct));
return;
}
SpawnAppearance_Struct* sa = (SpawnAppearance_Struct*)app->pBuffer;
Message(Chat::Red, "#: %i, %i", sa->type, sa->parameter);
SetHideMe(!sa->parameter);
return;
}
void Client::Handle_OP_GMKick(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMKick_Struct))
return;
if (this->Admin() < minStatusToKick) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/kick");
return;
}
GMKick_Struct* gmk = (GMKick_Struct *)app->pBuffer;
Client* client = entity_list.GetClientByName(gmk->name);
if (client == 0) {
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_KickPlayer, sizeof(ServerKickPlayer_Struct));
ServerKickPlayer_Struct* skp = (ServerKickPlayer_Struct*)pack->pBuffer;
strcpy(skp->adminname, gmk->gmname);
strcpy(skp->name, gmk->name);
skp->adminrank = this->Admin();
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
else {
entity_list.QueueClients(this, app);
//client->Kick();
}
return;
}
void Client::Handle_OP_GMKill(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/kill");
return;
}
if (app->size != sizeof(GMKill_Struct)) {
LogError("Wrong size: OP_GMKill, size=[{}], expected [{}]", app->size, sizeof(GMKill_Struct));
return;
}
GMKill_Struct* gmk = (GMKill_Struct *)app->pBuffer;
Mob* obj = entity_list.GetMob(gmk->name);
Client* client = entity_list.GetClientByName(gmk->name);
if (obj != 0) {
if (client != 0) {
entity_list.QueueClients(this, app);
}
else {
obj->Kill();
}
}
else {
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_KillPlayer, sizeof(ServerKillPlayer_Struct));
ServerKillPlayer_Struct* skp = (ServerKillPlayer_Struct*)pack->pBuffer;
strcpy(skp->gmname, gmk->gmname);
strcpy(skp->target, gmk->name);
skp->admin = this->Admin();
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
return;
}
void Client::Handle_OP_GMLastName(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMLastName_Struct)) {
std::cout << "Wrong size on OP_GMLastName. Got: " << app->size << ", Expected: " << sizeof(GMLastName_Struct) << std::endl;
return;
}
GMLastName_Struct* gmln = (GMLastName_Struct*)app->pBuffer;
if (strlen(gmln->lastname) >= 64) {
Message(Chat::Red, "/LastName: New last name too long. (max=63)");
}
else {
Client* client = entity_list.GetClientByName(gmln->name);
if (client == 0) {
Message(Chat::Red, "/LastName: %s not found", gmln->name);
}
else {
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(client->account_name, client->name, "/lastname");
return;
}
else
client->ChangeLastName(gmln->lastname);
}
gmln->unknown[0] = 1;
gmln->unknown[1] = 1;
gmln->unknown[2] = 1;
gmln->unknown[3] = 1;
entity_list.QueueClients(this, app, false);
}
return;
}
void Client::Handle_OP_GMNameChange(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMName_Struct)) {
LogError("Wrong size: OP_GMNameChange, size=[{}], expected [{}]", app->size, sizeof(GMName_Struct));
return;
}
const GMName_Struct* gmn = (const GMName_Struct *)app->pBuffer;
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/name");
return;
}
Client* client = entity_list.GetClientByName(gmn->oldname);
LogInfo("GM([{}]) changeing players name. Old:[{}] New:[{}]", GetName(), gmn->oldname, gmn->newname);
bool usedname = database.CheckUsedName((const char*)gmn->newname);
if (client == 0) {
Message(Chat::Red, "%s not found for name change. Operation failed!", gmn->oldname);
return;
}
if ((strlen(gmn->newname) > 63) || (strlen(gmn->newname) == 0)) {
Message(Chat::Red, "Invalid number of characters in new name (%s).", gmn->newname);
return;
}
if (!usedname) {
Message(Chat::Red, "%s is already in use. Operation failed!", gmn->newname);
return;
}
database.UpdateName(gmn->oldname, gmn->newname);
strcpy(client->name, gmn->newname);
client->Save();
if (gmn->badname == 1) {
database.AddToNameFilter(gmn->oldname);
}
EQApplicationPacket* outapp = app->Copy();
GMName_Struct* gmn2 = (GMName_Struct*)outapp->pBuffer;
gmn2->unknown[0] = 1;
gmn2->unknown[1] = 1;
gmn2->unknown[2] = 1;
entity_list.QueueClients(this, outapp, false);
safe_delete(outapp);
UpdateWho();
return;
}
void Client::Handle_OP_GMSearchCorpse(const EQApplicationPacket *app)
{
// Could make this into a rule, although there is a hard limit since we are using a popup, of 4096 bytes that can
// be displayed in the window, including all the HTML formatting tags.
//
const int maxResults = 10;
if (app->size < sizeof(GMSearchCorpse_Struct))
{
LogDebug("OP_GMSearchCorpse size lower than expected: got [{}] expected at least [{}]", app->size, sizeof(GMSearchCorpse_Struct));
DumpPacket(app);
return;
}
GMSearchCorpse_Struct *gmscs = (GMSearchCorpse_Struct *)app->pBuffer;
gmscs->Name[63] = '\0';
auto escSearchString = new char[129];
database.DoEscapeString(escSearchString, gmscs->Name, strlen(gmscs->Name));
std::string query = StringFormat("SELECT charname, zone_id, x, y, z, time_of_death, is_rezzed, is_buried "
"FROM character_corpses WheRE charname LIKE '%%%s%%' ORDER BY charname LIMIT %i",
escSearchString, maxResults);
safe_delete_array(escSearchString);
auto results = database.QueryDatabase(query);
if (!results.Success()) {
return;
}
if (results.RowCount() == 0)
return;
if (results.RowCount() == maxResults)
Message(Chat::Red, "Your search found too many results; some are not displayed.");
else
Message(Chat::Yellow, "There are %i corpse(s) that match the search string '%s'.", results.RowCount(), gmscs->Name);
char charName[64], time_of_death[20];
std::string popupText = "<table><tr><td>Name</td><td>Zone</td><td>X</td><td>Y</td><td>Z</td><td>Date</td><td>"
"Rezzed</td><td>Buried</td></tr><tr><td> </td><td></td><td></td><td></td><td></td><td>"
"</td><td></td><td></td></tr>";
for (auto row = results.begin(); row != results.end(); ++row) {
strn0cpy(charName, row[0], sizeof(charName));
uint32 ZoneID = atoi(row[1]);
float CorpseX = atof(row[2]);
float CorpseY = atof(row[3]);
float CorpseZ = atof(row[4]);
strn0cpy(time_of_death, row[5], sizeof(time_of_death));
bool corpseRezzed = atoi(row[6]);
bool corpseBuried = atoi(row[7]);
popupText += StringFormat("<tr><td>%s</td><td>%s</td><td>%8.0f</td><td>%8.0f</td><td>%8.0f</td><td>%s</td><td>%s</td><td>%s</td></tr>",
charName, StaticGetZoneName(ZoneID), CorpseX, CorpseY, CorpseZ, time_of_death,
corpseRezzed ? "Yes" : "No", corpseBuried ? "Yes" : "No");
if (popupText.size() > 4000) {
Message(Chat::Red, "Unable to display all the results.");
break;
}
}
popupText += "</table>";
SendPopupToClient("Corpses", popupText.c_str());
}
void Client::Handle_OP_GMServers(const EQApplicationPacket *app)
{
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_ZoneStatus, strlen(this->GetName()) + 2);
memset(pack->pBuffer, (uint8)admin, 1);
strcpy((char *)&pack->pBuffer[1], this->GetName());
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
void Client::Handle_OP_GMSummon(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMSummon_Struct)) {
std::cout << "Wrong size on OP_GMSummon. Got: " << app->size << ", Expected: " << sizeof(GMSummon_Struct) << std::endl;
return;
}
OPGMSummon(app);
return;
}
void Client::Handle_OP_GMToggle(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMToggle_Struct)) {
std::cout << "Wrong size on OP_GMToggle. Got: " << app->size << ", Expected: " << sizeof(GMToggle_Struct) << std::endl;
return;
}
if (this->Admin() < minStatusToUseGMCommands) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/toggle");
return;
}
GMToggle_Struct *ts = (GMToggle_Struct *)app->pBuffer;
if (ts->toggle == 0) {
this->MessageString(Chat::White, TOGGLE_OFF);
//Message(0, "Turning tells OFF");
tellsoff = true;
}
else if (ts->toggle == 1) {
//Message(0, "Turning tells ON");
this->MessageString(Chat::White, TOGGLE_ON);
tellsoff = false;
}
else {
Message(0, "Unkown value in /toggle packet");
}
UpdateWho();
return;
}
void Client::Handle_OP_GMTraining(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMTrainee_Struct)) {
LogDebug("Size mismatch in OP_GMTraining expected [{}] got [{}]", sizeof(GMTrainee_Struct), app->size);
DumpPacket(app);
return;
}
OPGMTraining(app);
return;
}
void Client::Handle_OP_GMTrainSkill(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMSkillChange_Struct)) {
LogDebug("Size mismatch in OP_GMTrainSkill expected [{}] got [{}]", sizeof(GMSkillChange_Struct), app->size);
DumpPacket(app);
return;
}
OPGMTrainSkill(app);
return;
}
void Client::Handle_OP_GMZoneRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(GMZoneRequest_Struct)) {
std::cout << "Wrong size on OP_GMZoneRequest. Got: " << app->size << ", Expected: " << sizeof(GMZoneRequest_Struct) << std::endl;
return;
}
if (this->Admin() < minStatusToBeGM) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/zone");
return;
}
GMZoneRequest_Struct* gmzr = (GMZoneRequest_Struct*)app->pBuffer;
float tarx = -1, tary = -1, tarz = -1;
int16 minstatus = 0;
uint8 minlevel = 0;
char tarzone[32];
uint16 zid = gmzr->zone_id;
if (gmzr->zone_id == 0)
zid = zonesummon_id;
const char * zname = database.GetZoneName(zid);
if (zname == nullptr)
tarzone[0] = 0;
else
strcpy(tarzone, zname);
// this both loads the safe points and does a sanity check on zone name
if (!database.GetSafePoints(tarzone, 0, &tarx, &tary, &tarz, &minstatus, &minlevel)) {
tarzone[0] = 0;
}
auto outapp = new EQApplicationPacket(OP_GMZoneRequest, sizeof(GMZoneRequest_Struct));
GMZoneRequest_Struct* gmzr2 = (GMZoneRequest_Struct*)outapp->pBuffer;
strcpy(gmzr2->charname, this->GetName());
gmzr2->zone_id = gmzr->zone_id;
gmzr2->x = tarx;
gmzr2->y = tary;
gmzr2->z = tarz;
// Next line stolen from ZoneChange as well... - This gives us a nicer message than the normal "zone is down" message...
if (tarzone[0] != 0 && admin >= minstatus && GetLevel() >= minlevel)
gmzr2->success = 1;
else {
std::cout << "GetZoneSafeCoords failed. zoneid = " << gmzr->zone_id << "; czone = " << zone->GetZoneID() << std::endl;
gmzr2->success = 0;
}
QueuePacket(outapp);
safe_delete(outapp);
return;
}
void Client::Handle_OP_GMZoneRequest2(const EQApplicationPacket *app)
{
if (this->Admin() < minStatusToBeGM) {
Message(Chat::Red, "Your account has been reported for hacking.");
database.SetHackerFlag(this->account_name, this->name, "/zone");
return;
}
if (app->size < sizeof(uint32)) {
LogError("OP size error: OP_GMZoneRequest2 expected:[{}] got:[{}]", sizeof(uint32), app->size);
return;
}
uint32 zonereq = *((uint32 *)app->pBuffer);
GoToSafeCoords(zonereq, 0);
return;
}
void Client::Handle_OP_GroupAcknowledge(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_GroupCancelInvite(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupCancel_Struct)) {
LogError("Invalid size for OP_GroupCancelInvite: Expected: [{}], Got: [{}]",
sizeof(GroupCancel_Struct), app->size);
return;
}
GroupCancel_Struct* gf = (GroupCancel_Struct*)app->pBuffer;
Mob* inviter = entity_list.GetClientByName(gf->name1);
if (inviter != nullptr)
{
if (inviter->IsClient())
inviter->CastToClient()->QueuePacket(app);
}
else
{
auto pack = new ServerPacket(ServerOP_GroupCancelInvite, sizeof(GroupCancel_Struct));
memcpy(pack->pBuffer, gf, sizeof(GroupCancel_Struct));
worldserver.SendPacket(pack);
safe_delete(pack);
}
if (!GetMerc())
{
database.SetGroupID(GetName(), 0, CharacterID(), false);
}
return;
}
void Client::Handle_OP_GroupDelete(const EQApplicationPacket *app)
{
//should check for leader, only they should be able to do this..
Group* group = GetGroup();
if (group)
group->DisbandGroup();
if (LFP)
UpdateLFP();
return;
}
void Client::Handle_OP_GroupDisband(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupGeneric_Struct)) {
LogError("Invalid size for GroupGeneric_Struct: Expected: [{}], Got: [{}]",
sizeof(GroupGeneric_Struct), app->size);
return;
}
LogDebug("Member Disband Request from [{}]\n", GetName());
GroupGeneric_Struct* gd = (GroupGeneric_Struct*)app->pBuffer;
Raid *raid = entity_list.GetRaidByClient(this);
if (raid)
{
Mob* memberToDisband = nullptr;
if (!raid->IsGroupLeader(GetName()))
memberToDisband = this;
else
memberToDisband = GetTarget();
if (!memberToDisband)
memberToDisband = entity_list.GetMob(gd->name2);
if (!memberToDisband)
memberToDisband = this;
if (!memberToDisband->IsClient())
return;
//we have a raid.. see if we're in a raid group
uint32 grp = raid->GetGroup(memberToDisband->GetName());
bool wasGrpLdr = raid->members[raid->GetPlayerIndex(memberToDisband->GetName())].IsGroupLeader;
if (grp < 12) {
if (wasGrpLdr) {
raid->SetGroupLeader(memberToDisband->GetName(), false);
for (int x = 0; x < MAX_RAID_MEMBERS; x++)
{
if (raid->members[x].GroupNumber == grp)
{
if (strlen(raid->members[x].membername) > 0 && strcmp(raid->members[x].membername, memberToDisband->GetName()) != 0)
{
raid->SetGroupLeader(raid->members[x].membername);
break;
}
}
}
}
raid->MoveMember(memberToDisband->GetName(), 0xFFFFFFFF);
raid->GroupUpdate(grp); //break
//raid->SendRaidGroupRemove(memberToDisband->GetName(), grp);
//raid->SendGroupUpdate(memberToDisband->CastToClient());
raid->SendGroupDisband(memberToDisband->CastToClient());
}
//we're done
return;
}
Group* group = GetGroup();
if (!group)
return;
#ifdef BOTS
// this block is necessary to allow more control over controlling how bots are zoned or camped.
if (Bot::GroupHasBot(group)) {
if (group->IsLeader(this)) {
if ((GetTarget() == 0 || GetTarget() == this) || (group->GroupCount() < 3)) {
Bot::ProcessBotGroupDisband(this, std::string());
}
else {
Mob* tempMember = entity_list.GetMob(gd->name1); //Name1 is the target you are disbanding
if (tempMember && tempMember->IsBot()) {
tempMember->CastToBot()->RemoveBotFromGroup(tempMember->CastToBot(), group);
if (LFP)
{
// If we are looking for players, update to show we are on our own now.
UpdateLFP();
}
return; //No need to continue from here we were removing a bot from party
}
}
}
}
group = GetGroup();
if (!group) //We must recheck this here.. incase the final bot disbanded the party..otherwise we crash
return;
#endif
Mob* memberToDisband = GetTarget();
if (!memberToDisband)
memberToDisband = entity_list.GetMob(gd->name2);
if (memberToDisband) {
auto group2 = memberToDisband->GetGroup();
if (group2 != group) // they're not in our group!
memberToDisband = this;
}
if (group->GroupCount() < 3)
{
group->DisbandGroup();
if (GetMerc())
GetMerc()->Suspend();
}
else if (group->IsLeader(this) && GetTarget() == nullptr)
{
if (group->GroupCount() > 2 && GetMerc() && !GetMerc()->IsSuspended())
{
group->DisbandGroup();
GetMerc()->MercJoinClientGroup();
}
else
{
group->DisbandGroup();
if (GetMerc())
GetMerc()->Suspend();
}
}
else if (group->IsLeader(this) && (GetTarget() == this || memberToDisband == this))
{
LeaveGroup();
if (GetMerc() && !GetMerc()->IsSuspended())
{
GetMerc()->MercJoinClientGroup();
}
}
else
{
if (memberToDisband)
{
if (group->IsLeader(this))
{
// the group leader can kick other members out of the group...
if (memberToDisband->IsClient())
{
group->DelMember(memberToDisband, false);
Client* memberClient = memberToDisband->CastToClient();
Merc* memberMerc = memberToDisband->CastToClient()->GetMerc();
if (memberClient && memberMerc)
{
memberMerc->MercJoinClientGroup();
}
}
else if (memberToDisband->IsMerc())
{
memberToDisband->CastToMerc()->Suspend();
}
}
else
{
// ...but other members can only remove themselves
group->DelMember(this, false);
if (GetMerc() && !GetMerc()->IsSuspended())
{
GetMerc()->MercJoinClientGroup();
}
}
}
else
{
LogError("Failed to remove player from group. Unable to find player named [{}] in player group", gd->name2);
}
}
if (LFP)
{
// If we are looking for players, update to show we are on our own now.
UpdateLFP();
}
return;
}
void Client::Handle_OP_GroupFollow(const EQApplicationPacket *app)
{
Handle_OP_GroupFollow2(app);
}
void Client::Handle_OP_GroupFollow2(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupGeneric_Struct)) {
LogError("Invalid size for OP_GroupFollow: Expected: [{}], Got: [{}]",
sizeof(GroupGeneric_Struct), app->size);
return;
}
if (LFP) {
// If we were looking for players to start our own group, but we accept an invitation to another
// group, turn LFP off.
database.SetLFP(CharacterID(), false);
worldserver.StopLFP(CharacterID());
}
GroupGeneric_Struct* gf = (GroupGeneric_Struct*)app->pBuffer;
Mob* inviter = entity_list.GetClientByName(gf->name1);
// Inviter and Invitee are in the same zone
if (inviter != nullptr && inviter->IsClient())
{
if (GroupFollow(inviter->CastToClient()))
{
strn0cpy(gf->name1, inviter->GetName(), sizeof(gf->name1));
strn0cpy(gf->name2, GetName(), sizeof(gf->name2));
inviter->CastToClient()->QueuePacket(app);//notify inviter the client accepted
}
}
else if (inviter == nullptr)
{
// Inviter is in another zone - Remove merc from group now if any
LeaveGroup();
auto pack = new ServerPacket(ServerOP_GroupFollow, sizeof(ServerGroupFollow_Struct));
ServerGroupFollow_Struct *sgfs = (ServerGroupFollow_Struct *)pack->pBuffer;
sgfs->CharacterID = CharacterID();
strn0cpy(sgfs->gf.name1, gf->name1, sizeof(sgfs->gf.name1));
strn0cpy(sgfs->gf.name2, gf->name2, sizeof(sgfs->gf.name2));
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void Client::Handle_OP_GroupInvite(const EQApplicationPacket *app)
{
//this seems to be the initial invite to form a group
Handle_OP_GroupInvite2(app);
}
void Client::Handle_OP_GroupInvite2(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupInvite_Struct)) {
LogError("Invalid size for OP_GroupInvite: Expected: [{}], Got: [{}]",
sizeof(GroupInvite_Struct), app->size);
return;
}
GroupInvite_Struct* gis = (GroupInvite_Struct*)app->pBuffer;
Mob *Invitee = entity_list.GetMob(gis->invitee_name);
if (Invitee == this)
{
MessageString(Chat::LightGray, GROUP_INVITEE_SELF);
return;
}
if (Invitee)
{
if (Invitee->IsClient())
{
if (Invitee->CastToClient()->MercOnlyOrNoGroup() && !Invitee->IsRaidGrouped())
{
if (app->GetOpcode() == OP_GroupInvite2)
{
//Make a new packet using all the same information but make sure it's a fixed GroupInvite opcode so we
//Don't have to deal with GroupFollow2 crap.
auto outapp =
new EQApplicationPacket(OP_GroupInvite, sizeof(GroupInvite_Struct));
memcpy(outapp->pBuffer, app->pBuffer, outapp->size);
Invitee->CastToClient()->QueuePacket(outapp);
safe_delete(outapp);
return;
}
else
{
//The correct opcode, no reason to bother wasting time reconstructing the packet
Invitee->CastToClient()->QueuePacket(app);
}
}
}
#ifdef BOTS
else if (Invitee->IsBot()) {
Bot::ProcessBotGroupInvite(this, std::string(Invitee->GetName()));
}
#endif
}
else
{
auto pack = new ServerPacket(ServerOP_GroupInvite, sizeof(GroupInvite_Struct));
memcpy(pack->pBuffer, gis, sizeof(GroupInvite_Struct));
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
void Client::Handle_OP_GroupMakeLeader(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_GroupMakeLeader, app, GroupMakeLeader_Struct);
GroupMakeLeader_Struct *gmls = (GroupMakeLeader_Struct *)app->pBuffer;
Mob* NewLeader = entity_list.GetClientByName(gmls->NewLeader);
Group* g = GetGroup();
if (NewLeader && g)
{
if (g->IsLeader(this))
g->ChangeLeader(NewLeader);
else {
LogDebug("Group /makeleader request originated from non-leader member: [{}]", GetName());
DumpPacket(app);
}
}
}
void Client::Handle_OP_GroupMentor(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupMentor_Struct)) {
LogError("Wrong size: OP_GroupMentor, size=[{}], expected [{}]", app->size, sizeof(GroupMentor_Struct));
DumpPacket(app);
return;
}
GroupMentor_Struct *gms = (GroupMentor_Struct *)app->pBuffer;
gms->name[63] = '\0';
if (IsRaidGrouped()) {
Raid *raid = GetRaid();
if (!raid)
return;
uint32 group_id = raid->GetGroup(this);
if (group_id > 11)
return;
if (strlen(gms->name))
raid->SetGroupMentor(group_id, gms->percent, gms->name);
else
raid->ClearGroupMentor(group_id);
return;
}
Group *group = GetGroup();
if (!group)
return;
if (strlen(gms->name))
group->SetGroupMentor(gms->percent, gms->name);
else
group->ClearGroupMentor();
return;
}
void Client::Handle_OP_GroupRoles(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupRole_Struct)) {
LogError("Wrong size: OP_GroupRoles, size=[{}], expected [{}]", app->size, sizeof(GroupRole_Struct));
DumpPacket(app);
return;
}
GroupRole_Struct *grs = (GroupRole_Struct*)app->pBuffer;
Group *g = GetGroup();
if (!g)
return;
switch (grs->RoleNumber)
{
case 1: //Main Tank
{
if (grs->Toggle)
g->DelegateMainTank(grs->Name1, grs->Toggle);
else
g->UnDelegateMainTank(grs->Name1, grs->Toggle);
break;
}
case 2: //Main Assist
{
if (grs->Toggle)
g->DelegateMainAssist(grs->Name1, grs->Toggle);
else
g->UnDelegateMainAssist(grs->Name1, grs->Toggle);
break;
}
case 3: //Puller
{
if (grs->Toggle)
g->DelegatePuller(grs->Name1, grs->Toggle);
else
g->UnDelegatePuller(grs->Name1, grs->Toggle);
break;
}
default:
break;
}
}
void Client::Handle_OP_GroupUpdate(const EQApplicationPacket *app)
{
if (app->size != sizeof(GroupUpdate_Struct))
{
LogDebug("Size mismatch on OP_GroupUpdate: got [{}] expected [{}]", app->size, sizeof(GroupUpdate_Struct));
DumpPacket(app);
return;
}
GroupUpdate_Struct* gu = (GroupUpdate_Struct*)app->pBuffer;
switch (gu->action) {
case groupActMakeLeader:
{
Mob* newleader = entity_list.GetClientByName(gu->membername[0]);
Group* group = this->GetGroup();
if (newleader && group) {
// the client only sends this if it's the group leader, but check anyway
if (group->IsLeader(this))
group->ChangeLeader(newleader);
else {
LogDebug("Group /makeleader request originated from non-leader member: [{}]", GetName());
DumpPacket(app);
}
}
break;
}
default:
{
LogDebug("Received unhandled OP_GroupUpdate requesting action [{}]", gu->action);
DumpPacket(app);
return;
}
}
}
void Client::Handle_OP_GuildBank(const EQApplicationPacket *app)
{
if (!GuildBanks)
return;
if ((int)zone->GetZoneID() != RuleI(World, GuildBankZoneID))
{
Message(Chat::Red, "The Guild Bank is not available in this zone.");
return;
}
if (app->size < sizeof(uint32)) {
LogError("Wrong size: OP_GuildBank, size=[{}], expected [{}]", app->size, sizeof(uint32));
DumpPacket(app);
return;
}
char *Buffer = (char *)app->pBuffer;
uint32 Action = VARSTRUCT_DECODE_TYPE(uint32, Buffer);
uint32 sentAction = Action;
if (!IsInAGuild())
{
Message(Chat::Red, "You must be in a Guild to use the Guild Bank.");
if (Action == GuildBankDeposit)
GuildBankDepositAck(true, sentAction);
else
GuildBankAck();
return;
}
if (!IsGuildBanker())
{
if ((Action != GuildBankDeposit) && (Action != GuildBankViewItem) && (Action != GuildBankWithdraw))
{
LogError("Suspected hacking attempt on guild bank from [{}]", GetName());
GuildBankAck();
return;
}
}
switch (Action)
{
case GuildBankPromote:
{
if (GuildBanks->IsAreaFull(GuildID(), GuildBankMainArea))
{
MessageString(Chat::Red, GUILD_BANK_FULL);
GuildBankDepositAck(true, sentAction);
return;
}
GuildBankPromote_Struct *gbps = (GuildBankPromote_Struct*)app->pBuffer;
int Slot = GuildBanks->Promote(GuildID(), gbps->Slot);
if (Slot >= 0)
{
EQEmu::ItemInstance* inst = GuildBanks->GetItem(GuildID(), GuildBankMainArea, Slot, 1);
if (inst)
{
MessageString(Chat::LightGray, GUILD_BANK_TRANSFERRED, inst->GetItem()->Name);
safe_delete(inst);
}
}
else
Message(Chat::Red, "Unexpected error while moving item into Guild Bank.");
GuildBankAck();
break;
}
case GuildBankViewItem:
{
GuildBankViewItem_Struct *gbvis = (GuildBankViewItem_Struct*)app->pBuffer;
EQEmu::ItemInstance* inst = GuildBanks->GetItem(GuildID(), gbvis->Area, gbvis->SlotID, 1);
if (!inst)
break;
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
break;
}
case GuildBankDeposit: // Deposit Item
{
if (GuildBanks->IsAreaFull(GuildID(), GuildBankDepositArea))
{
MessageString(Chat::Red, GUILD_BANK_FULL);
GuildBankDepositAck(true, sentAction);
return;
}
EQEmu::ItemInstance *CursorItemInst = GetInv().GetItem(EQEmu::invslot::slotCursor);
bool Allowed = true;
if (!CursorItemInst)
{
Message(Chat::Red, "No Item on the cursor.");
GuildBankDepositAck(true, sentAction);
return;
}
const EQEmu::ItemData* CursorItem = CursorItemInst->GetItem();
if (!CursorItem->NoDrop || CursorItemInst->IsAttuned())
{
Allowed = false;
}
else if (CursorItemInst->IsNoneEmptyContainer())
{
Allowed = false;
}
else if (CursorItemInst->IsAugmented())
{
Allowed = false;
}
else if (CursorItem->NoRent == 0)
{
Allowed = false;
}
else if (CursorItem->LoreFlag && GuildBanks->HasItem(GuildID(), CursorItem->ID))
{
Allowed = false;
}
if (!Allowed)
{
MessageString(Chat::Red, GUILD_BANK_CANNOT_DEPOSIT);
GuildBankDepositAck(true, sentAction);
return;
}
if (GuildBanks->AddItem(GuildID(), GuildBankDepositArea, CursorItem->ID, CursorItemInst->GetCharges(), GetName(), GuildBankBankerOnly, ""))
{
GuildBankDepositAck(false, sentAction);
DeleteItemInInventory(EQEmu::invslot::slotCursor, 0, false);
}
break;
}
case GuildBankPermissions:
{
GuildBankPermissions_Struct *gbps = (GuildBankPermissions_Struct*)app->pBuffer;
if (gbps->Permissions == 1)
GuildBanks->SetPermissions(GuildID(), gbps->SlotID, gbps->Permissions, gbps->MemberName);
else
GuildBanks->SetPermissions(GuildID(), gbps->SlotID, gbps->Permissions, "");
GuildBankAck();
break;
}
case GuildBankWithdraw:
{
if (GetInv()[EQEmu::invslot::slotCursor])
{
MessageString(Chat::Red, GUILD_BANK_EMPTY_HANDS);
GuildBankAck();
break;
}
GuildBankWithdrawItem_Struct *gbwis = (GuildBankWithdrawItem_Struct*)app->pBuffer;
EQEmu::ItemInstance* inst = GuildBanks->GetItem(GuildID(), gbwis->Area, gbwis->SlotID, gbwis->Quantity);
if (!inst)
{
GuildBankAck();
break;
}
if (!IsGuildBanker() && !GuildBanks->AllowedToWithdraw(GuildID(), gbwis->Area, gbwis->SlotID, GetName()))
{
LogError("Suspected attempted hack on the guild bank from [{}]", GetName());
GuildBankAck();
safe_delete(inst);
break;
}
if (CheckLoreConflict(inst->GetItem()))
{
MessageString(Chat::Red, DUP_LORE);
GuildBankAck();
safe_delete(inst);
break;
}
if (gbwis->Quantity > 0)
{
PushItemOnCursor(*inst);
SendItemPacket(EQEmu::invslot::slotCursor, inst, ItemPacketLimbo);
GuildBanks->DeleteItem(GuildID(), gbwis->Area, gbwis->SlotID, gbwis->Quantity);
}
else
{
Message(0, "Unable to withdraw 0 quantity of %s", inst->GetItem()->Name);
}
safe_delete(inst);
GuildBankAck();
break;
}
case GuildBankSplitStacks:
{
if (GuildBanks->IsAreaFull(GuildID(), GuildBankMainArea))
MessageString(Chat::Red, GUILD_BANK_FULL);
else
{
GuildBankWithdrawItem_Struct *gbwis = (GuildBankWithdrawItem_Struct*)app->pBuffer;
GuildBanks->SplitStack(GuildID(), gbwis->SlotID, gbwis->Quantity);
}
GuildBankAck();
break;
}
case GuildBankMergeStacks:
{
GuildBankWithdrawItem_Struct *gbwis = (GuildBankWithdrawItem_Struct*)app->pBuffer;
GuildBanks->MergeStacks(GuildID(), gbwis->SlotID);
GuildBankAck();
break;
}
default:
{
Message(Chat::Red, "Unexpected GuildBank action.");
LogError("Received unexpected guild bank action code [{}] from [{}]", Action, GetName());
}
}
}
void Client::Handle_OP_GuildCreate(const EQApplicationPacket *app)
{
if (IsInAGuild())
{
Message(Chat::Red, "You are already in a guild!");
return;
}
if (!RuleB(Guild, PlayerCreationAllowed))
{
Message(Chat::Red, "This feature is disabled on this server. Contact a GM or post on your server message boards to create a guild.");
return;
}
if ((Admin() < RuleI(Guild, PlayerCreationRequiredStatus)) ||
(GetLevel() < RuleI(Guild, PlayerCreationRequiredLevel)) ||
(database.GetTotalTimeEntitledOnAccount(AccountID()) < (unsigned int)RuleI(Guild, PlayerCreationRequiredTime)))
{
Message(Chat::Red, "Your status, level or time playing on this account are insufficient to use this feature.");
return;
}
// The Underfoot client Guild Creation window will only allow a guild name of <= around 30 characters, but the packet is 64 bytes. Sanity check the
// name anway.
//
char *GuildName = (char *)app->pBuffer;
#ifdef DARWIN
#if __DARWIN_C_LEVEL < 200809L
if (strlen(GuildName) > 60)
#else
if (strnlen(GuildName, 64) > 60)
#endif // __DARWIN_C_LEVEL
#else
if (strnlen(GuildName, 64) > 60)
#endif // DARWIN
{
Message(Chat::Red, "Guild name too long.");
return;
}
for (unsigned int i = 0; i < strlen(GuildName); ++i)
{
if (!isalpha(GuildName[i]) && (GuildName[i] != ' '))
{
Message(Chat::Red, "Invalid character in Guild name.");
return;
}
}
int32 GuildCount = guild_mgr.DoesAccountContainAGuildLeader(AccountID());
if (GuildCount >= RuleI(Guild, PlayerCreationLimit))
{
Message(Chat::Red, "You cannot create this guild because this account may only be leader of %i guilds.", RuleI(Guild, PlayerCreationLimit));
return;
}
if (guild_mgr.GetGuildIDByName(GuildName) != GUILD_NONE)
{
MessageString(Chat::Red, GUILD_NAME_IN_USE);
return;
}
uint32 NewGuildID = guild_mgr.CreateGuild(GuildName, CharacterID());
LogGuilds("[{}]: Creating guild [{}] with leader [{}] via UF+ GUI. It was given id [{}]", GetName(),
GuildName, CharacterID(), (unsigned long)NewGuildID);
if (NewGuildID == GUILD_NONE)
Message(Chat::Red, "Guild creation failed.");
else
{
if (!guild_mgr.SetGuild(CharacterID(), NewGuildID, GUILD_LEADER))
Message(Chat::Red, "Unable to set guild leader's guild in the database. Contact a GM.");
else
{
Message(Chat::Yellow, "You are now the leader of %s", GuildName);
if (zone->GetZoneID() == RuleI(World, GuildBankZoneID) && GuildBanks)
GuildBanks->SendGuildBank(this);
SendGuildRanks();
}
}
}
void Client::Handle_OP_GuildDelete(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildDelete");
if (!IsInAGuild() || !guild_mgr.IsGuildLeader(GuildID(), CharacterID()))
Message(0, "You are not a guild leader or not in a guild.");
else {
LogGuilds("Deleting guild [{}] ([{}])", guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.DeleteGuild(GuildID()))
Message(0, "Guild delete failed.");
else {
Message(0, "Guild successfully deleted.");
}
}
}
void Client::Handle_OP_GuildDemote(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildDemote");
if (app->size != sizeof(GuildDemoteStruct)) {
LogGuilds("Error: app size of [{}] != size of GuildDemoteStruct of [{}]\n", app->size, sizeof(GuildDemoteStruct));
return;
}
if (!IsInAGuild())
Message(0, "Error: You arent in a guild!");
else if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_DEMOTE))
Message(0, "You dont have permission to invite.");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
GuildDemoteStruct* demote = (GuildDemoteStruct*)app->pBuffer;
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(demote->target, gci)) {
Message(0, "Unable to find '%s'", demote->target);
return;
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
if (gci.rank < 1) {
Message(0, "%s cannot be demoted any further!", demote->target);
return;
}
uint8 rank = gci.rank - 1;
LogGuilds("Demoting [{}] ([{}]) from rank [{}] ([{}]) to [{}] ([{}]) in [{}] ([{}])",
demote->target, gci.char_id,
guild_mgr.GetRankName(GuildID(), gci.rank), gci.rank,
guild_mgr.GetRankName(GuildID(), rank), rank,
guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.SetGuildRank(gci.char_id, rank)) {
Message(Chat::Red, "Error while setting rank %d on '%s'.", rank, demote->target);
return;
}
Message(0, "Successfully demoted %s to rank %d", demote->target, rank);
}
// SendGuildMembers(GuildID(), true);
return;
}
void Client::Handle_OP_GuildInvite(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildInvite");
if (app->size != sizeof(GuildCommand_Struct)) {
std::cout << "Wrong size: OP_GuildInvite, size=" << app->size << ", expected " << sizeof(GuildCommand_Struct) << std::endl;
return;
}
GuildCommand_Struct* gc = (GuildCommand_Struct*)app->pBuffer;
if (!IsInAGuild())
Message(0, "Error: You are not in a guild!");
else if (gc->officer > GUILD_MAX_RANK)
Message(Chat::Red, "Invalid rank.");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
//ok, the invite is also used for changing rank as well.
Mob* invitee = entity_list.GetMob(gc->othername);
if (!invitee) {
Message(Chat::Red, "Prospective guild member %s must be in zone to preform guild operations on them.", gc->othername);
return;
}
if (invitee->IsClient()) {
Client* client = invitee->CastToClient();
//ok, figure out what they are trying to do.
if (client->GuildID() == GuildID()) {
//they are already in this guild, must be a promotion or demotion
if (gc->officer < client->GuildRank()) {
//demotion
if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_DEMOTE)) {
Message(Chat::Red, "You dont have permission to demote.");
return;
}
//we could send this to the member and prompt them to see if they want to
//be demoted (I guess), but I dont see a point in that.
LogGuilds("[{}] ([{}]) is demoting [{}] ([{}]) to rank [{}] in guild [{}] ([{}])",
GetName(), CharacterID(),
client->GetName(), client->CharacterID(),
gc->officer,
guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.SetGuildRank(client->CharacterID(), gc->officer)) {
Message(Chat::Red, "There was an error during the demotion, DB may now be inconsistent.");
return;
}
}
else if (gc->officer > client->GuildRank()) {
//promotion
if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_PROMOTE)) {
Message(Chat::Red, "You dont have permission to demote.");
return;
}
LogGuilds("[{}] ([{}]) is asking to promote [{}] ([{}]) to rank [{}] in guild [{}] ([{}])",
GetName(), CharacterID(),
client->GetName(), client->CharacterID(),
gc->officer,
guild_mgr.GetGuildName(GuildID()), GuildID());
//record the promotion with guild manager so we know its valid when we get the reply
guild_mgr.RecordInvite(client->CharacterID(), GuildID(), gc->officer);
if (gc->guildeqid == 0)
gc->guildeqid = GuildID();
LogGuilds("Sending OP_GuildInvite for promotion to [{}], length [{}]", client->GetName(), app->size);
client->QueuePacket(app);
}
else {
Message(Chat::Red, "That member is already that rank.");
return;
}
}
else if (!client->IsInAGuild()) {
//they are not in this or any other guild, this is an invite
//
if (client->GetPendingGuildInvitation())
{
Message(Chat::Red, "That person is already considering a guild invitation.");
return;
}
if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_INVITE)) {
Message(Chat::Red, "You dont have permission to invite.");
return;
}
LogGuilds("Inviting [{}] ([{}]) into guild [{}] ([{}])",
client->GetName(), client->CharacterID(),
guild_mgr.GetGuildName(GuildID()), GuildID());
//record the invite with guild manager so we know its valid when we get the reply
guild_mgr.RecordInvite(client->CharacterID(), GuildID(), gc->officer);
if (gc->guildeqid == 0)
gc->guildeqid = GuildID();
// Convert Membership Level between RoF and previous clients.
if (client->ClientVersion() < EQEmu::versions::ClientVersion::RoF && ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
{
gc->officer = 0;
}
if (client->ClientVersion() >= EQEmu::versions::ClientVersion::RoF && ClientVersion() < EQEmu::versions::ClientVersion::RoF)
{
gc->officer = 8;
}
LogGuilds("Sending OP_GuildInvite for invite to [{}], length [{}]", client->GetName(), app->size);
client->SetPendingGuildInvitation(true);
client->QueuePacket(app);
}
else {
//they are in some other guild
Message(Chat::Red, "Player is in a guild.");
return;
}
}
#ifdef BOTS
else if (invitee->IsBot()) {
// The guild system is too tightly coupled with the character_data table so we have to avoid using much of the system
Bot::ProcessGuildInvite(this, invitee->CastToBot());
return;
}
#endif
}
}
void Client::Handle_OP_GuildInviteAccept(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildInviteAccept");
SetPendingGuildInvitation(false);
if (app->size != sizeof(GuildInviteAccept_Struct)) {
std::cout << "Wrong size: OP_GuildInviteAccept, size=" << app->size << ", expected " << sizeof(GuildJoin_Struct) << std::endl;
return;
}
GuildInviteAccept_Struct* gj = (GuildInviteAccept_Struct*)app->pBuffer;
uint32 guildrank = gj->response;
if (ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
{
if (gj->response > 9)
{
//dont care if the check fails (since we dont know the rank), just want to clear the entry.
guild_mgr.VerifyAndClearInvite(CharacterID(), gj->guildeqid, gj->response);
worldserver.SendEmoteMessage(gj->inviter, 0, 0, "%s has declined to join the guild.", this->GetName());
return;
}
}
if (gj->response == 5 || gj->response == 4) {
//dont care if the check fails (since we dont know the rank), just want to clear the entry.
guild_mgr.VerifyAndClearInvite(CharacterID(), gj->guildeqid, gj->response);
worldserver.SendEmoteMessage(gj->inviter, 0, 0, "%s has declined to join the guild.", this->GetName());
return;
}
//uint32 tmpeq = gj->guildeqid;
if (IsInAGuild() && gj->response == GuildRank())
Message(0, "Error: You're already in a guild!");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
LogGuilds("Guild Invite Accept: guild [{}], response [{}], inviter [{}], person [{}]",
gj->guildeqid, gj->response, gj->inviter, gj->newmember);
//ok, the invite is also used for changing rank as well.
Mob* inviter = entity_list.GetMob(gj->inviter);
if (inviter && inviter->IsClient())
{
Client* client = inviter->CastToClient();
// Convert Membership Level between RoF and previous clients.
if (client->ClientVersion() < EQEmu::versions::ClientVersion::RoF && ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
{
guildrank = 0;
}
if (client->ClientVersion() >= EQEmu::versions::ClientVersion::RoF && ClientVersion() < EQEmu::versions::ClientVersion::RoF)
{
guildrank = 8;
}
}
//we dont really care a lot about what this packet means, as long as
//it has been authorized with the guild manager
if (!guild_mgr.VerifyAndClearInvite(CharacterID(), gj->guildeqid, guildrank)) {
worldserver.SendEmoteMessage(gj->inviter, 0, 0, "%s has sent an invalid response to your invite!", GetName());
Message(Chat::Red, "Invalid invite response packet!");
return;
}
if (gj->guildeqid == GuildID()) {
//only need to change rank.
LogGuilds("Changing guild rank of [{}] ([{}]) to rank [{}] in guild [{}] ([{}])",
GetName(), CharacterID(),
gj->response,
guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.SetGuildRank(CharacterID(), gj->response)) {
Message(Chat::Red, "There was an error during the rank change, DB may now be inconsistent.");
return;
}
}
else {
LogGuilds("Adding [{}] ([{}]) to guild [{}] ([{}]) at rank [{}]",
GetName(), CharacterID(),
guild_mgr.GetGuildName(gj->guildeqid), gj->guildeqid,
gj->response);
//change guild and rank
guildrank = gj->response;
if (ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
{
if (gj->response == 8)
{
guildrank = 0;
}
}
if (!guild_mgr.SetGuild(CharacterID(), gj->guildeqid, guildrank)) {
Message(Chat::Red, "There was an error during the invite, DB may now be inconsistent.");
return;
}
if (zone->GetZoneID() == RuleI(World, GuildBankZoneID) && GuildBanks)
GuildBanks->SendGuildBank(this);
}
}
}
void Client::Handle_OP_GuildLeader(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildLeader");
if (app->size < 2) {
LogGuilds("Invalid length [{}] on OP_GuildLeader", app->size);
return;
}
app->pBuffer[app->size - 1] = 0;
GuildMakeLeader* gml = (GuildMakeLeader*)app->pBuffer;
if (!IsInAGuild())
Message(0, "Error: You arent in a guild!");
else if (GuildRank() != GUILD_LEADER)
Message(0, "Error: You arent the guild leader!");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
//NOTE: we could do cross-zone lookups here...
Client* newleader = entity_list.GetClientByName(gml->target);
if (newleader) {
LogGuilds("Transfering leadership of [{}] ([{}]) to [{}] ([{}])",
guild_mgr.GetGuildName(GuildID()), GuildID(),
newleader->GetName(), newleader->CharacterID());
if (guild_mgr.SetGuildLeader(GuildID(), newleader->CharacterID())) {
Message(0, "Successfully Transfered Leadership to %s.", gml->target);
newleader->Message(Chat::Yellow, "%s has transfered the guild leadership into your hands.", GetName());
}
else
Message(0, "Could not change leadership at this time.");
}
else
Message(0, "Failed to change leader, could not find target.");
}
// SendGuildMembers(GuildID(), true);
return;
}
void Client::Handle_OP_GuildManageBanker(const EQApplicationPacket *app)
{
LogGuilds("Got OP_GuildManageBanker of len [{}]", app->size);
if (app->size != sizeof(GuildManageBanker_Struct)) {
LogGuilds("Error: app size of [{}] != size of OP_GuildManageBanker of [{}]\n", app->size, sizeof(GuildManageBanker_Struct));
return;
}
GuildManageBanker_Struct* gmb = (GuildManageBanker_Struct*)app->pBuffer;
if (!IsInAGuild()) {
Message(Chat::Red, "Your not in a guild!");
return;
}
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(gmb->member, gci))
{
Message(0, "Unable to find '%s'", gmb->member);
return;
}
bool IsCurrentlyABanker = guild_mgr.GetBankerFlag(gci.char_id);
bool IsCurrentlyAnAlt = guild_mgr.GetAltFlag(gci.char_id);
bool NewBankerStatus = gmb->enabled & 0x01;
bool NewAltStatus = gmb->enabled & 0x02;
if ((IsCurrentlyABanker != NewBankerStatus) && !guild_mgr.IsGuildLeader(GuildID(), CharacterID()))
{
Message(Chat::Red, "Only the guild leader can assign guild bankers!");
return;
}
if (IsCurrentlyAnAlt != NewAltStatus)
{
bool IsAllowed = !strncasecmp(GetName(), gmb->member, strlen(GetName())) || (GuildRank() >= GUILD_OFFICER);
if (!IsAllowed)
{
Message(Chat::Red, "You are not allowed to change the alt status of %s", gmb->member);
return;
}
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
if (IsCurrentlyABanker != NewBankerStatus)
{
if (!guild_mgr.SetBankerFlag(gci.char_id, NewBankerStatus)) {
Message(Chat::Red, "Error setting guild banker flag.");
return;
}
if (NewBankerStatus)
Message(0, "%s has been made a guild banker.", gmb->member);
else
Message(0, "%s is no longer a guild banker.", gmb->member);
}
if (IsCurrentlyAnAlt != NewAltStatus)
{
if (!guild_mgr.SetAltFlag(gci.char_id, NewAltStatus)) {
Message(Chat::Red, "Error setting guild alt flag.");
return;
}
if (NewAltStatus)
Message(0, "%s has been marked as an alt.", gmb->member);
else
Message(0, "%s is no longer marked as an alt.", gmb->member);
}
}
void Client::Handle_OP_GuildPeace(const EQApplicationPacket *app)
{
LogGuilds("Got OP_GuildPeace of len [{}]", app->size);
return;
}
void Client::Handle_OP_GuildPromote(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildPromote");
if (app->size != sizeof(GuildPromoteStruct)) {
LogGuilds("Error: app size of [{}] != size of GuildDemoteStruct of [{}]\n", app->size, sizeof(GuildPromoteStruct));
return;
}
if (!IsInAGuild())
Message(0, "Error: You arent in a guild!");
else if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_PROMOTE))
Message(0, "You dont have permission to invite.");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
GuildPromoteStruct* promote = (GuildPromoteStruct*)app->pBuffer;
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(promote->target, gci)) {
Message(0, "Unable to find '%s'", promote->target);
return;
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
uint8 rank = gci.rank + 1;
if (rank > GUILD_OFFICER)
{
Message(0, "You cannot promote someone to be guild leader. You must use /guildleader.");
return;
}
LogGuilds("Promoting [{}] ([{}]) from rank [{}] ([{}]) to [{}] ([{}]) in [{}] ([{}])",
promote->target, gci.char_id,
guild_mgr.GetRankName(GuildID(), gci.rank), gci.rank,
guild_mgr.GetRankName(GuildID(), rank), rank,
guild_mgr.GetGuildName(GuildID()), GuildID());
if (!guild_mgr.SetGuildRank(gci.char_id, rank)) {
Message(Chat::Red, "Error while setting rank %d on '%s'.", rank, promote->target);
return;
}
Message(0, "Successfully promoted %s to rank %d", promote->target, rank);
}
return;
}
void Client::Handle_OP_GuildPublicNote(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildPublicNote");
if (app->size < sizeof(GuildUpdate_PublicNote)) {
// client calls for a motd on login even if they arent in a guild
printf("Error: app size of %i < size of OP_GuildPublicNote of %zu\n", app->size, sizeof(GuildUpdate_PublicNote));
return;
}
GuildUpdate_PublicNote* gpn = (GuildUpdate_PublicNote*)app->pBuffer;
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(gpn->target, gci)) {
Message(0, "Unable to find '%s'", gpn->target);
return;
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
LogGuilds("Setting public note on [{}] ([{}]) in guild [{}] ([{}]) to: [{}]",
gpn->target, gci.char_id,
guild_mgr.GetGuildName(GuildID()), GuildID(),
gpn->note);
if (!guild_mgr.SetPublicNote(gci.char_id, gpn->note)) {
Message(Chat::Red, "Failed to set public note on %s", gpn->target);
}
else {
Message(0, "Successfully changed public note on %s", gpn->target);
}
// SendGuildMembers(GuildID(), true);
return;
}
void Client::Handle_OP_GuildRemove(const EQApplicationPacket *app)
{
LogGuilds("Received OP_GuildRemove");
if (app->size != sizeof(GuildCommand_Struct)) {
std::cout << "Wrong size: OP_GuildRemove, size=" << app->size << ", expected " << sizeof(GuildCommand_Struct) << std::endl;
return;
}
GuildCommand_Struct* gc = (GuildCommand_Struct*)app->pBuffer;
if (!IsInAGuild())
Message(0, "Error: You arent in a guild!");
// we can always remove ourself, otherwise, our rank needs remove permissions
else if (strcasecmp(gc->othername, GetName()) != 0 &&
!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_REMOVE))
Message(0, "You dont have permission to remove guild members.");
else if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
#ifdef BOTS
if (Bot::ProcessGuildRemoval(this, gc->othername))
return;
#endif
uint32 char_id;
Client* client = entity_list.GetClientByName(gc->othername);
if (client) {
if (!client->IsInGuild(GuildID())) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
char_id = client->CharacterID();
LogGuilds("Removing [{}] ([{}]) from guild [{}] ([{}])",
client->GetName(), client->CharacterID(),
guild_mgr.GetGuildName(GuildID()), GuildID());
}
else {
CharGuildInfo gci;
if (!guild_mgr.GetCharInfo(gc->othername, gci)) {
Message(0, "Unable to find '%s'", gc->othername);
return;
}
if (gci.guild_id != GuildID()) {
Message(0, "You aren't in the same guild, what do you think you are doing?");
return;
}
char_id = gci.char_id;
LogGuilds("Removing remote/offline [{}] ([{}]) into guild [{}] ([{}])",
gci.char_name.c_str(), gci.char_id,
guild_mgr.GetGuildName(GuildID()), GuildID());
}
if (!guild_mgr.SetGuild(char_id, GUILD_NONE, 0)) {
auto outapp = new EQApplicationPacket(OP_GuildManageRemove, sizeof(GuildManageRemove_Struct));
GuildManageRemove_Struct* gm = (GuildManageRemove_Struct*)outapp->pBuffer;
gm->guildeqid = GuildID();
strcpy(gm->member, gc->othername);
Message(0, "%s successfully removed from your guild.", gc->othername);
entity_list.QueueClientsGuild(this, outapp, false, GuildID());
safe_delete(outapp);
}
else
Message(0, "Unable to remove %s from your guild.", gc->othername);
}
// SendGuildMembers(GuildID(), true);
return;
}
void Client::Handle_OP_GuildStatus(const EQApplicationPacket *app)
{
if (app->size != sizeof(GuildStatus_Struct))
{
LogDebug("Size mismatch in OP_GuildStatus expected [{}] got [{}]", sizeof(GuildStatus_Struct), app->size);
DumpPacket(app);
return;
}
GuildStatus_Struct *gss = (GuildStatus_Struct*)app->pBuffer;
Client *c = entity_list.GetClientByName(gss->Name);
if (!c)
{
MessageString(Chat::LightGray, TARGET_PLAYER_FOR_GUILD_STATUS);
return;
}
uint32 TargetGuildID = c->GuildID();
if (TargetGuildID == GUILD_NONE)
{
MessageString(Chat::LightGray, NOT_IN_A_GUILD, c->GetName());
return;
}
const char *GuildName = guild_mgr.GetGuildName(TargetGuildID);
if (!GuildName)
return;
bool IsLeader = guild_mgr.CheckPermission(TargetGuildID, c->GuildRank(), GUILD_PROMOTE);
bool IsOfficer = guild_mgr.CheckPermission(TargetGuildID, c->GuildRank(), GUILD_INVITE);
if ((TargetGuildID == GuildID()) && (c != this))
{
if (IsLeader)
MessageString(Chat::LightGray, LEADER_OF_YOUR_GUILD, c->GetName());
else if (IsOfficer)
MessageString(Chat::LightGray, OFFICER_OF_YOUR_GUILD, c->GetName());
else
MessageString(Chat::LightGray, MEMBER_OF_YOUR_GUILD, c->GetName());
return;
}
if (IsLeader)
MessageString(Chat::LightGray, LEADER_OF_X_GUILD, c->GetName(), GuildName);
else if (IsOfficer)
MessageString(Chat::LightGray, OFFICER_OF_X_GUILD, c->GetName(), GuildName);
else
MessageString(Chat::LightGray, MEMBER_OF_X_GUILD, c->GetName(), GuildName);
}
void Client::Handle_OP_GuildUpdateURLAndChannel(const EQApplicationPacket *app)
{
if (app->size != sizeof(GuildUpdateURLAndChannel_Struct))
{
LogDebug("Size mismatch in OP_GuildUpdateURLAndChannel expected [{}] got [{}]", sizeof(GuildUpdateURLAndChannel_Struct), app->size);
DumpPacket(app);
return;
}
GuildUpdateURLAndChannel_Struct *guuacs = (GuildUpdateURLAndChannel_Struct*)app->pBuffer;
if (!IsInAGuild())
return;
if (!guild_mgr.IsGuildLeader(GuildID(), CharacterID()))
{
Message(Chat::Red, "Only the guild leader can change the Channel or URL.!");
return;
}
if (guuacs->Action == 0)
guild_mgr.SetGuildURL(GuildID(), guuacs->Text);
else
guild_mgr.SetGuildChannel(GuildID(), guuacs->Text);
}
void Client::Handle_OP_GuildWar(const EQApplicationPacket *app)
{
LogGuilds("Got OP_GuildWar of len [{}]", app->size);
return;
}
void Client::Handle_OP_Heartbeat(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_Hide(const EQApplicationPacket *app)
{
// newer client respond to OP_CancelSneakHide with OP_Hide with a size of 4 and 0 data
if (app->size == 4) {
auto data = app->ReadUInt32(0);
if (data)
LogDebug("Got OP_Hide with unexpected data [{}]", data);
return;
}
if (!HasSkill(EQEmu::skills::SkillHide) && GetSkill(EQEmu::skills::SkillHide) == 0)
{
//Can not be able to train hide but still have it from racial though
return; //You cannot hide if you do not have hide
}
if (!p_timers.Expired(&database, pTimerHide, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
int reuse = HideReuseTime - GetSkillReuseTime(EQEmu::skills::SkillHide);
if (reuse < 1)
reuse = 1;
p_timers.Start(pTimerHide, reuse - 1);
float hidechance = ((GetSkill(EQEmu::skills::SkillHide) / 250.0f) + .25) * 100;
float random = zone->random.Real(0, 100);
CheckIncreaseSkill(EQEmu::skills::SkillHide, nullptr, 5);
if (random < hidechance) {
auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x03;
sa_out->parameter = 1;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
if (spellbonuses.ShroudofStealth || aabonuses.ShroudofStealth || itembonuses.ShroudofStealth) {
improved_hidden = true;
hidden = true;
}
else
hidden = true;
tmHidden = Timer::GetCurrentTime();
}
if (GetClass() == ROGUE) {
auto outapp = new EQApplicationPacket(OP_SimpleMessage, sizeof(SimpleMessage_Struct));
SimpleMessage_Struct *msg = (SimpleMessage_Struct *)outapp->pBuffer;
msg->color = 0x010E;
Mob *evadetar = GetTarget();
if (!auto_attack && (evadetar && evadetar->CheckAggro(this)
&& evadetar->IsNPC())) {
if (zone->random.Int(0, 260) < (int)GetSkill(EQEmu::skills::SkillHide)) {
msg->string_id = EVADE_SUCCESS;
RogueEvade(evadetar);
}
else {
msg->string_id = EVADE_FAIL;
}
}
else {
if (hidden) {
msg->string_id = HIDE_SUCCESS;
}
else {
msg->string_id = HIDE_FAIL;
}
}
FastQueuePacket(&outapp);
}
return;
}
void Client::Handle_OP_HideCorpse(const EQApplicationPacket *app)
{
// New OPCode for SOD+ as /hidecorpse is handled serverside now.
//
if (app->size != sizeof(HideCorpse_Struct))
{
LogDebug("Size mismatch in OP_HideCorpse expected [{}] got [{}]", sizeof(HideCorpse_Struct), app->size);
DumpPacket(app);
return;
}
HideCorpse_Struct *hcs = (HideCorpse_Struct*)app->pBuffer;
if (hcs->Action == HideCorpseLooted)
return;
if ((HideCorpseMode == HideCorpseNone) && (hcs->Action == HideCorpseNone))
return;
entity_list.HideCorpses(this, HideCorpseMode, hcs->Action);
HideCorpseMode = hcs->Action;
}
void Client::Handle_OP_Ignore(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_Illusion(const EQApplicationPacket *app)
{
if (app->size != sizeof(Illusion_Struct)) {
LogError("Received invalid sized OP_Illusion: got [{}], expected [{}]", app->size, sizeof(Illusion_Struct));
DumpPacket(app);
return;
}
if (!GetGM())
{
database.SetMQDetectionFlag(this->AccountName(), this->GetName(), "OP_Illusion sent by non Game Master.", zone->GetShortName());
return;
}
Illusion_Struct* bnpc = (Illusion_Struct*)app->pBuffer;
//these need to be implemented
/*
texture = bnpc->texture;
helmtexture = bnpc->helmtexture;
luclinface = bnpc->luclinface;
*/
race = bnpc->race;
size = 0;
entity_list.QueueClients(this, app);
return;
}
void Client::Handle_OP_InspectAnswer(const EQApplicationPacket *app)
{
if (app->size != sizeof(InspectResponse_Struct)) {
LogError("Wrong size: OP_InspectAnswer, size=[{}], expected [{}]", app->size, sizeof(InspectResponse_Struct));
return;
}
//Fills the app sent from client.
EQApplicationPacket* outapp = app->Copy();
InspectResponse_Struct* insr = (InspectResponse_Struct*)outapp->pBuffer;
Mob* tmp = entity_list.GetMob(insr->TargetID);
const EQEmu::ItemData* item = nullptr;
int ornamentationAugtype = RuleI(Character, OrnamentationAugmentType);
for (int16 L = EQEmu::invslot::EQUIPMENT_BEGIN; L <= EQEmu::invslot::EQUIPMENT_END; L++) {
const EQEmu::ItemInstance* inst = GetInv().GetItem(L);
item = inst ? inst->GetItem() : nullptr;
if (item) {
strcpy(insr->itemnames[L], item->Name);
if (inst && inst->GetOrnamentationAug(ornamentationAugtype)) {
const EQEmu::ItemData *aug_item = inst->GetOrnamentationAug(ornamentationAugtype)->GetItem();
insr->itemicons[L] = aug_item->Icon;
}
else if (inst->GetOrnamentationIcon()) {
insr->itemicons[L] = inst->GetOrnamentationIcon();
}
else {
insr->itemicons[L] = item->Icon;
}
}
else { insr->itemicons[L] = 0xFFFFFFFF; }
}
InspectMessage_Struct* newmessage = (InspectMessage_Struct*)insr->text;
InspectMessage_Struct& playermessage = this->GetInspectMessage();
memcpy(&playermessage, newmessage, sizeof(InspectMessage_Struct));
database.SaveCharacterInspectMessage(this->CharacterID(), &playermessage);
if (tmp != 0 && tmp->IsClient()) { tmp->CastToClient()->QueuePacket(outapp); } // Send answer to requester
return;
}
void Client::Handle_OP_InspectMessageUpdate(const EQApplicationPacket *app)
{
if (app->size != sizeof(InspectMessage_Struct)) {
LogError("Wrong size: OP_InspectMessageUpdate, size=[{}], expected [{}]", app->size, sizeof(InspectMessage_Struct));
return;
}
InspectMessage_Struct* newmessage = (InspectMessage_Struct*)app->pBuffer;
InspectMessage_Struct& playermessage = this->GetInspectMessage();
memcpy(&playermessage, newmessage, sizeof(InspectMessage_Struct));
database.SaveCharacterInspectMessage(this->CharacterID(), &playermessage);
}
void Client::Handle_OP_InspectRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(Inspect_Struct)) {
LogError("Wrong size: OP_InspectRequest, size=[{}], expected [{}]", app->size, sizeof(Inspect_Struct));
return;
}
Inspect_Struct* ins = (Inspect_Struct*)app->pBuffer;
Mob* tmp = entity_list.GetMob(ins->TargetID);
if (tmp != 0 && tmp->IsClient()) {
if (tmp->CastToClient()->ClientVersion() < EQEmu::versions::ClientVersion::SoF) { tmp->CastToClient()->QueuePacket(app); } // Send request to target
// Inspecting an SoF or later client will make the server handle the request
else { ProcessInspectRequest(tmp->CastToClient(), this); }
}
#ifdef BOTS
if (tmp != 0 && tmp->IsBot()) { Bot::ProcessBotInspectionRequest(tmp->CastToBot(), this); }
#endif
return;
}
void Client::Handle_OP_InstillDoubt(const EQApplicationPacket *app)
{
//packet is empty as of 12/14/04
if (!p_timers.Expired(&database, pTimerInstillDoubt, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerInstillDoubt, InstillDoubtReuseTime - 1);
InstillDoubt(GetTarget());
return;
}
void Client::Handle_OP_ItemLinkClick(const EQApplicationPacket *app)
{
if (app->size != sizeof(ItemViewRequest_Struct)) {
LogError("Wrong size on OP_ItemLinkClick. Got: [{}], Expected: [{}]", app->size,
sizeof(ItemViewRequest_Struct));
DumpPacket(app);
return;
}
ItemViewRequest_Struct *ivrs = (ItemViewRequest_Struct *)app->pBuffer;
// todo: verify ivrs->link_hash based on a rule, in case we don't care about people being able to sniff data
// from the item DB
const EQEmu::ItemData *item = database.GetItem(ivrs->item_id);
if (!item) {
if (ivrs->item_id != SAYLINK_ITEM_ID) {
Message(Chat::Red, "Error: The item for the link you have clicked on does not exist!");
return;
}
// This new scheme will shuttle the ID in the first augment for non-silent links
// and the second augment for silent.
std::string response = "";
bool silentsaylink = ivrs->augments[1] > 0 ? true : false;
int sayid = silentsaylink ? ivrs->augments[1] : ivrs->augments[0];
if (sayid > 0) {
std::string query = StringFormat("SELECT `phrase` FROM saylink WHERE `id` = '%i'", sayid);
auto results = database.QueryDatabase(query);
if (!results.Success()) {
Message(Chat::Red, "Error: The saylink (%s) was not found in the database.", response.c_str());
return;
}
if (results.RowCount() != 1) {
Message(Chat::Red, "Error: The saylink (%s) was not found in the database.", response.c_str());
return;
}
auto row = results.begin();
response = row[0];
}
if ((response).size() > 0) {
if (!mod_saylink(response, silentsaylink)) {
return;
}
if (GetTarget() && GetTarget()->IsNPC()) {
if (silentsaylink) {
parse->EventNPC(EVENT_SAY, GetTarget()->CastToNPC(), this, response.c_str(), 0);
if (response[0] == '#' && parse->PlayerHasQuestSub(EVENT_COMMAND)) {
parse->EventPlayer(EVENT_COMMAND, this, response.c_str(), 0);
}
#ifdef BOTS
else if (response[0] == '^' && parse->PlayerHasQuestSub(EVENT_BOT_COMMAND)) {
parse->EventPlayer(EVENT_BOT_COMMAND, this, response.c_str(), 0);
}
#endif
else {
parse->EventPlayer(EVENT_SAY, this, response.c_str(), 0);
}
}
else {
Message(Chat::LightGray, "You say, '%s'", response.c_str());
ChannelMessageReceived(8, 0, 100, response.c_str());
}
return;
}
else {
if (silentsaylink) {
if (response[0] == '#' && parse->PlayerHasQuestSub(EVENT_COMMAND)) {
parse->EventPlayer(EVENT_COMMAND, this, response.c_str(), 0);
}
#ifdef BOTS
else if (response[0] == '^' && parse->PlayerHasQuestSub(EVENT_BOT_COMMAND)) {
parse->EventPlayer(EVENT_BOT_COMMAND, this, response.c_str(), 0);
}
#endif
else {
parse->EventPlayer(EVENT_SAY, this, response.c_str(), 0);
}
}
else {
Message(Chat::LightGray, "You say, '%s'", response.c_str());
ChannelMessageReceived(8, 0, 100, response.c_str());
}
return;
}
}
else {
Message(Chat::Red, "Error: Say Link not found or is too long.");
return;
}
}
EQEmu::ItemInstance *inst =
database.CreateItem(item, item->MaxCharges, ivrs->augments[0], ivrs->augments[1], ivrs->augments[2],
ivrs->augments[3], ivrs->augments[4], ivrs->augments[5]);
if (inst) {
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
return;
}
void Client::Handle_OP_ItemLinkResponse(const EQApplicationPacket *app)
{
if (app->size != sizeof(LDONItemViewRequest_Struct)) {
LogError("OP size error: OP_ItemLinkResponse expected:[{}] got:[{}]", sizeof(LDONItemViewRequest_Struct), app->size);
return;
}
LDONItemViewRequest_Struct* item = (LDONItemViewRequest_Struct*)app->pBuffer;
EQEmu::ItemInstance* inst = database.CreateItem(item->item_id);
if (inst) {
SendItemPacket(0, inst, ItemPacketViewLink);
safe_delete(inst);
}
return;
}
void Client::Handle_OP_ItemName(const EQApplicationPacket *app)
{
if (app->size != sizeof(ItemNamePacket_Struct)) {
LogError("Invalid size for ItemNamePacket_Struct: Expected: [{}], Got: [{}]",
sizeof(ItemNamePacket_Struct), app->size);
return;
}
ItemNamePacket_Struct *p = (ItemNamePacket_Struct*)app->pBuffer;
const EQEmu::ItemData *item = nullptr;
if ((item = database.GetItem(p->item_id)) != nullptr) {
auto outapp = new EQApplicationPacket(OP_ItemName, sizeof(ItemNamePacket_Struct));
p = (ItemNamePacket_Struct*)outapp->pBuffer;
memset(p, 0, sizeof(ItemNamePacket_Struct));
strcpy(p->name, item->Name);
FastQueuePacket(&outapp);
}
return;
}
void Client::Handle_OP_ItemPreview(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_ItemPreview, app, ItemPreview_Struct);
ItemPreview_Struct *ips = (ItemPreview_Struct *)app->pBuffer;
const EQEmu::ItemData* item = database.GetItem(ips->itemid);
if (item) {
auto outapp = new EQApplicationPacket(OP_ItemPreview, strlen(item->Name) + strlen(item->Lore) +
strlen(item->IDFile) + 898);
int spacer;
for (spacer = 0; spacer < 16; spacer++) {
outapp->WriteUInt8(48);
}
outapp->WriteUInt16(256);
for (spacer = 0; spacer < 7; spacer++) {
outapp->WriteUInt8(0);
}
for (spacer = 0; spacer < 7; spacer++) {
outapp->WriteUInt8(255);
}
outapp->WriteUInt32(0);
outapp->WriteUInt32(1);
outapp->WriteUInt32(0);
outapp->WriteUInt8(237); // Seems to be some kind of counter? increases by 1 for each preview that you do.
outapp->WriteUInt16(2041); //F907
for (spacer = 0; spacer < 36; spacer++) {
outapp->WriteUInt8(0);
}
for (spacer = 0; spacer < 4; spacer++) {
outapp->WriteUInt8(255);
}
for (spacer = 0; spacer < 9; spacer++) {
outapp->WriteUInt8(0);
}
for (spacer = 0; spacer < 5; spacer++) {
outapp->WriteUInt8(255);
}
for (spacer = 0; spacer < 5; spacer++) {
outapp->WriteUInt8(0);
}
outapp->WriteString(item->Name);
outapp->WriteString(item->Lore);
outapp->WriteUInt8(0);
outapp->WriteUInt32(ips->itemid);
outapp->WriteUInt32(item->Weight);
outapp->WriteUInt8(item->NoRent);
outapp->WriteUInt8(item->NoDrop);
outapp->WriteUInt8(item->Attuneable);
outapp->WriteUInt8(item->Size);
outapp->WriteUInt32(item->Slots);
outapp->WriteUInt32(item->Price);
outapp->WriteUInt32(item->Icon);
outapp->WriteUInt8(0); //Unknown?
outapp->WriteUInt8(0); //Placeable flag?
outapp->WriteUInt32(item->BenefitFlag);
outapp->WriteUInt8(item->Tradeskills);
outapp->WriteUInt8(item->CR);
outapp->WriteUInt8(item->DR);
outapp->WriteUInt8(item->PR);
outapp->WriteUInt8(item->MR);
outapp->WriteUInt8(item->FR);
outapp->WriteUInt8(item->AStr);
outapp->WriteUInt8(item->ASta);
outapp->WriteUInt8(item->AAgi);
outapp->WriteUInt8(item->ADex);
outapp->WriteUInt8(item->ACha);
outapp->WriteUInt8(item->AInt);
outapp->WriteUInt8(item->AWis);
outapp->WriteSInt32(item->HP);
outapp->WriteSInt32(item->Mana);
outapp->WriteSInt32(item->Endur);
outapp->WriteSInt32(item->AC);
outapp->WriteUInt32(item->Regen);
outapp->WriteUInt32(item->ManaRegen);
outapp->WriteSInt32(item->EnduranceRegen);
outapp->WriteUInt32(item->Classes);
outapp->WriteUInt32(item->Races);
outapp->WriteUInt32(item->Deity);
outapp->WriteUInt32(item->SkillModValue);
outapp->WriteUInt32(0); //SkillModValue
outapp->WriteUInt32(item->SkillModType);
outapp->WriteUInt32(0); //SkillModExtra
outapp->WriteUInt32(item->BaneDmgRace);
outapp->WriteUInt32(item->BaneDmgBody);
outapp->WriteUInt32(item->BaneDmgRaceAmt);
outapp->WriteUInt32(item->BaneDmgAmt);
outapp->WriteUInt8(item->Magic);
outapp->WriteUInt32(item->CastTime_);
outapp->WriteUInt32(item->ReqLevel);
outapp->WriteUInt32(item->RecLevel);
outapp->WriteUInt32(item->RecSkill);
outapp->WriteUInt32(item->BardType);
outapp->WriteUInt32(item->BardValue);
outapp->WriteUInt8(item->Light);
outapp->WriteUInt8(item->Delay);
outapp->WriteUInt8(item->ElemDmgType);
outapp->WriteUInt8(item->ElemDmgAmt);
outapp->WriteUInt8(item->Range);
outapp->WriteUInt32(item->Damage);
outapp->WriteUInt32(item->Color);
outapp->WriteUInt32(0); // Prestige
outapp->WriteUInt8(item->ItemType);
outapp->WriteUInt32(item->Material);
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt32(item->EliteMaterial);
outapp->WriteUInt32(item->HerosForgeModel);
outapp->WriteUInt32(0); // unknown
outapp->WriteUInt32(0); //This is unknown057 from lucy
for (spacer = 0; spacer < 77; spacer++) { //More Item stats, but some seem to be off based on packet check
outapp->WriteUInt8(0);
}
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
outapp->WriteUInt32(0); //Unknown
for (spacer = 0; spacer < 6; spacer++) { //Augment stuff
outapp->WriteUInt32(item->AugSlotType[spacer]);
outapp->WriteUInt8(item->AugSlotVisible[spacer]);
outapp->WriteUInt8(item->AugSlotUnk2[spacer]);
}
outapp->WriteUInt32(0); //New RoF 6th Aug Slot
outapp->WriteUInt8(1); //^
outapp->WriteUInt8(0); //^^
outapp->WriteUInt32(item->LDoNSold);
outapp->WriteUInt32(item->LDoNTheme);
outapp->WriteUInt32(item->LDoNPrice);
outapp->WriteUInt32(item->LDoNSellBackRate);
for (spacer = 0; spacer < 11; spacer++) { //unknowns
outapp->WriteUInt8(0);
}
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
outapp->WriteUInt16(0); //Unknown
outapp->WriteUInt32(item->Favor); // Tribute
for (spacer = 0; spacer < 17; spacer++) { //unknowns
outapp->WriteUInt8(0);
}
outapp->WriteUInt32(item->GuildFavor); // Tribute
outapp->WriteUInt32(0); //Unknown
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
for (spacer = 0; spacer < 11; spacer++) { //unknowns
outapp->WriteUInt8(0);
}
outapp->WriteUInt8(1);
for (spacer = 0; spacer < 25; spacer++) { //unknowns
outapp->WriteUInt8(0);
}
for (spacer = 0; spacer < 304; spacer++) { //Cast stuff and whole bunch of unknowns
outapp->WriteUInt8(0);
}
outapp->WriteUInt8(142); // Always seen not in the item structure though 8E
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt32(1); // Always seen as 1
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt32(0xCDCCCC3D); // Unknown
outapp->WriteUInt32(0);
outapp->WriteUInt16(8256); //0x4020/8256
outapp->WriteUInt16(0);
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
outapp->WriteUInt16(0);
outapp->WriteUInt32(0xFFFFFFFF); //Unknown but always seen as FF FF FF FF
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt32(0); //unknown
outapp->WriteUInt16(0); //unknown
outapp->WriteUInt32(32831); //0x3F80
for (spacer = 0; spacer < 24; spacer++) { //whole bunch of unknowns always 0's
outapp->WriteUInt8(0);
}
outapp->WriteUInt8(1);
for (spacer = 0; spacer < 6; spacer++) { //whole bunch of unknowns always 0's
outapp->WriteUInt8(0);
}
QueuePacket(outapp);
safe_delete(outapp);
}
else
return;
}
void Client::Handle_OP_ItemVerifyRequest(const EQApplicationPacket *app)
{
using EQEmu::spells::CastingSlot;
if (app->size != sizeof(ItemVerifyRequest_Struct))
{
LogError("OP size error: OP_ItemVerifyRequest expected:[{}] got:[{}]", sizeof(ItemVerifyRequest_Struct), app->size);
return;
}
ItemVerifyRequest_Struct* request = (ItemVerifyRequest_Struct*)app->pBuffer;
int32 slot_id;
int32 target_id;
int32 spell_id = 0;
slot_id = request->slot;
target_id = request->target;
EQApplicationPacket *outapp = nullptr;
outapp = new EQApplicationPacket(OP_ItemVerifyReply, sizeof(ItemVerifyReply_Struct));
ItemVerifyReply_Struct* reply = (ItemVerifyReply_Struct*)outapp->pBuffer;
reply->slot = slot_id;
reply->target = target_id;
QueuePacket(outapp);
safe_delete(outapp);
if (IsAIControlled()) {
this->MessageString(Chat::Red, NOT_IN_CONTROL);
return;
}
if (slot_id < 0) {
LogDebug("Unknown slot being used by [{}], slot being used is: [{}]", GetName(), request->slot);
return;
}
const EQEmu::ItemInstance* inst = m_inv[slot_id];
if (!inst) {
Message(0, "Error: item not found in inventory slot #%i", slot_id);
DeleteItemInInventory(slot_id, 0, true);
return;
}
const EQEmu::ItemData* item = inst->GetItem();
if (!item) {
Message(0, "Error: item not found in inventory slot #%i", slot_id);
DeleteItemInInventory(slot_id, 0, true);
return;
}
spell_id = item->Click.Effect;
if
(
spell_id > 0 &&
(
!IsValidSpell(spell_id) ||
casting_spell_id ||
delaytimer ||
spellend_timer.Enabled() ||
IsStunned() ||
IsFeared() ||
IsMezzed() ||
DivineAura() ||
(spells[spell_id].targettype == ST_Ring) ||
(IsSilenced() && !IsDiscipline(spell_id)) ||
(IsAmnesiad() && IsDiscipline(spell_id)) ||
(IsDetrimentalSpell(spell_id) && !zone->CanDoCombat()) ||
(inst->IsScaling() && inst->GetExp() <= 0) // charms don't have spells when less than 0
)
)
{
SendSpellBarEnable(spell_id);
return;
}
// Modern clients don't require pet targeted for item clicks that are ST_Pet
if (spell_id > 0 && (spells[spell_id].targettype == ST_Pet || spells[spell_id].targettype == ST_SummonedPet))
target_id = GetPetID();
LogDebug("OP ItemVerifyRequest: spell=[{}], target=[{}], inv=[{}]", spell_id, target_id, slot_id);
if (m_inv.SupportsClickCasting(slot_id) || ((item->ItemType == EQEmu::item::ItemTypePotion || item->PotionBelt) && m_inv.SupportsPotionBeltCasting(slot_id))) // sanity check
{
EQEmu::ItemInstance* p_inst = (EQEmu::ItemInstance*)inst;
parse->EventItem(EVENT_ITEM_CLICK, this, p_inst, nullptr, "", slot_id);
inst = m_inv[slot_id];
if (!inst)
{
return;
}
int r;
bool tryaug = false;
EQEmu::ItemInstance* clickaug = nullptr;
EQEmu::ItemData* augitem = nullptr;
for (r = EQEmu::invaug::SOCKET_BEGIN; r <= EQEmu::invaug::SOCKET_END; r++) {
const EQEmu::ItemInstance* aug_i = inst->GetAugment(r);
if (!aug_i)
continue;
const EQEmu::ItemData* aug = aug_i->GetItem();
if (!aug)
continue;
if ((aug->Click.Type == EQEmu::item::ItemEffectClick) || (aug->Click.Type == EQEmu::item::ItemEffectExpendable) || (aug->Click.Type == EQEmu::item::ItemEffectEquipClick) || (aug->Click.Type == EQEmu::item::ItemEffectClick2))
{
tryaug = true;
clickaug = (EQEmu::ItemInstance*)aug_i;
augitem = (EQEmu::ItemData*)aug;
spell_id = aug->Click.Effect;
break;
}
}
if ((spell_id <= 0) && (item->ItemType != EQEmu::item::ItemTypeFood && item->ItemType != EQEmu::item::ItemTypeDrink && item->ItemType != EQEmu::item::ItemTypeAlcohol && item->ItemType != EQEmu::item::ItemTypeSpell))
{
LogDebug("Item with no effect right clicked by [{}]", GetName());
}
else if (inst->IsClassCommon())
{
if (!RuleB(Skills, RequireTomeHandin) && item->ItemType == EQEmu::item::ItemTypeSpell && (strstr((const char*)item->Name, "Tome of ") || strstr((const char*)item->Name, "Skill: ")))
{
DeleteItemInInventory(slot_id, 1, true);
TrainDiscipline(item->ID);
}
else if (item->ItemType == EQEmu::item::ItemTypeSpell)
{
return;
}
else if ((item->Click.Type == EQEmu::item::ItemEffectClick) || (item->Click.Type == EQEmu::item::ItemEffectExpendable) || (item->Click.Type == EQEmu::item::ItemEffectEquipClick) || (item->Click.Type == EQEmu::item::ItemEffectClick2))
{
if (inst->GetCharges() == 0)
{
//Message(0, "This item is out of charges.");
MessageString(Chat::Red, ITEM_OUT_OF_CHARGES);
return;
}
if (GetLevel() >= item->Click.Level2)
{
int i = parse->EventItem(EVENT_ITEM_CLICK_CAST, this, p_inst, nullptr, "", slot_id);
inst = m_inv[slot_id];
if (!inst)
{
return;
}
if (i == 0) {
if (!IsCastWhileInvis(item->Click.Effect))
CommonBreakInvisible(); // client can't do this for us :(
CastSpell(item->Click.Effect, target_id, CastingSlot::Item, item->CastTime, 0, 0, slot_id);
}
}
else
{
MessageString(Chat::Red, ITEMS_INSUFFICIENT_LEVEL);
return;
}
}
else if (tryaug)
{
if (clickaug->GetCharges() == 0)
{
//Message(0, "This item is out of charges.");
MessageString(Chat::Red, ITEM_OUT_OF_CHARGES);
return;
}
if (GetLevel() >= augitem->Click.Level2)
{
int i = parse->EventItem(EVENT_ITEM_CLICK_CAST, this, clickaug, nullptr, "", slot_id);
inst = m_inv[slot_id];
if (!inst)
{
return;
}
if (i == 0) {
if (!IsCastWhileInvis(augitem->Click.Effect))
CommonBreakInvisible(); // client can't do this for us :(
CastSpell(augitem->Click.Effect, target_id, CastingSlot::Item, augitem->CastTime, 0, 0, slot_id);
}
}
else
{
MessageString(Chat::Red, ITEMS_INSUFFICIENT_LEVEL);
return;
}
}
else
{
if (ClientVersion() >= EQEmu::versions::ClientVersion::SoD && !inst->IsEquipable(GetBaseRace(), GetClass()))
{
if (item->ItemType != EQEmu::item::ItemTypeFood && item->ItemType != EQEmu::item::ItemTypeDrink && item->ItemType != EQEmu::item::ItemTypeAlcohol)
{
LogDebug("Error: unknown item->Click.Type ([{}])", item->Click.Type);
}
else
{
/*
//This is food/drink - consume it
if (item->ItemType == EQEmu::item::ItemTypeFood && m_pp.hunger_level < 5000)
{
Consume(item, item->ItemType, slot_id, false);
}
else if (item->ItemType == EQEmu::item::ItemTypeDrink && m_pp.thirst_level < 5000)
{
Consume(item, item->ItemType, slot_id, false);
}
else if (item->ItemType == EQEmu::item::ItemTypeAlcohol)
{
#if EQDEBUG >= 1
LogDebug("Drinking Alcohol from slot:[{}]", slot_id);
#endif
// This Seems to be handled in OP_DeleteItem handling
//DeleteItemInInventory(slot_id, 1, false);
//entity_list.MessageCloseString(this, true, 50, 0, DRINKING_MESSAGE, GetName(), item->Name);
//Should add intoxication level to the PP at some point
//CheckIncreaseSkill(ALCOHOL_TOLERANCE, nullptr, 25);
}
EQApplicationPacket *outapp2 = nullptr;
outapp2 = new EQApplicationPacket(OP_Stamina, sizeof(Stamina_Struct));
Stamina_Struct* sta = (Stamina_Struct*)outapp2->pBuffer;
if (m_pp.hunger_level > 6000)
sta->food = 6000;
if (m_pp.thirst_level > 6000)
sta->water = 6000;
sta->food = m_pp.hunger_level;
sta->water = m_pp.thirst_level;
QueuePacket(outapp2);
safe_delete(outapp2);
*/
}
}
else
{
LogDebug("Error: unknown item->Click.Type ([{}])", item->Click.Type);
}
}
}
else
{
Message(0, "Error: item not found in inventory slot #%i", slot_id);
}
}
else
{
Message(0, "Error: Invalid inventory slot for using effects (inventory slot #%i)", slot_id);
}
return;
}
void Client::Handle_OP_Jump(const EQApplicationPacket *app)
{
SetEndurance(GetEndurance() - (GetLevel()<20 ? (225 * GetLevel() / 100) : 50));
return;
}
void Client::Handle_OP_KeyRing(const EQApplicationPacket *app)
{
KeyRingList();
}
void Client::Handle_OP_LDoNButton(const EQApplicationPacket *app)
{
if (app->size < sizeof(bool))
{
return;
}
if (GetPendingAdventureCreate())
{
return;
}
if (IsOnAdventure())
{
return;
}
bool* p = (bool*)app->pBuffer;
if (*p == true)
{
auto pack =
new ServerPacket(ServerOP_AdventureRequestCreate,
sizeof(ServerAdventureRequestCreate_Struct) + (64 * adv_requested_member_count));
ServerAdventureRequestCreate_Struct *sac = (ServerAdventureRequestCreate_Struct*)pack->pBuffer;
strcpy(sac->leader, GetName());
sac->id = adv_requested_id;
sac->theme = adv_requested_theme;
sac->member_count = adv_requested_member_count;
memcpy((pack->pBuffer + sizeof(ServerAdventureRequestCreate_Struct)), adv_requested_data, (64 * adv_requested_member_count));
worldserver.SendPacket(pack);
delete pack;
PendingAdventureCreate();
ClearPendingAdventureData();
}
else
{
ClearPendingAdventureData();
}
}
void Client::Handle_OP_LDoNDisarmTraps(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target->IsNPC())
{
if (HasSkill(EQEmu::skills::SkillDisarmTraps))
{
if (DistanceSquaredNoZ(m_Position, target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
HandleLDoNDisarm(target->CastToNPC(), GetSkill(EQEmu::skills::SkillDisarmTraps), LDoNTypeMechanical);
}
else
Message(Chat::Red, "You do not have the disarm trap skill.");
}
}
void Client::Handle_OP_LDoNInspect(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target && target->GetClass() == LDON_TREASURE)
Message(Chat::Yellow, "%s", target->GetCleanName());
}
void Client::Handle_OP_LDoNOpen(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target && target->IsNPC())
HandleLDoNOpen(target->CastToNPC());
}
void Client::Handle_OP_LDoNPickLock(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target->IsNPC())
{
if (HasSkill(EQEmu::skills::SkillPickLock))
{
if (DistanceSquaredNoZ(m_Position, target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
HandleLDoNPickLock(target->CastToNPC(), GetSkill(EQEmu::skills::SkillPickLock), LDoNTypeMechanical);
}
else
Message(Chat::Red, "You do not have the pick locks skill.");
}
}
void Client::Handle_OP_LDoNSenseTraps(const EQApplicationPacket *app)
{
Mob * target = GetTarget();
if (target->IsNPC())
{
if (HasSkill(EQEmu::skills::SkillSenseTraps))
{
if (DistanceSquaredNoZ(m_Position, target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
HandleLDoNSenseTraps(target->CastToNPC(), GetSkill(EQEmu::skills::SkillSenseTraps), LDoNTypeMechanical);
}
else
Message(Chat::Red, "You do not have the sense traps skill.");
}
}
void Client::Handle_OP_LeadershipExpToggle(const EQApplicationPacket *app)
{
if (app->size != 1) {
LogDebug("Size mismatch in OP_LeadershipExpToggle expected [{}] got [{}]", 1, app->size);
DumpPacket(app);
return;
}
uint8 *mode = (uint8 *)app->pBuffer;
if (*mode) {
m_pp.leadAAActive = 1;
Save();
MessageString(Chat::Yellow, LEADERSHIP_EXP_ON);
}
else {
m_pp.leadAAActive = 0;
Save();
MessageString(Chat::Yellow, LEADERSHIP_EXP_OFF);
}
}
void Client::Handle_OP_LeaveAdventure(const EQApplicationPacket *app)
{
if (!IsOnAdventure())
{
return;
}
LeaveAdventure();
}
void Client::Handle_OP_LeaveBoat(const EQApplicationPacket *app)
{
Mob* boat = entity_list.GetMob(this->controlling_boat_id); // find the mob corresponding to the boat id
if (boat) {
if ((boat->GetTarget() == this) && boat->GetHateAmount(this) == 0) { // if the client somehow left while still controlling the boat (and the boat isn't attacking them)
boat->SetTarget(nullptr); // fix it to stop later problems
}
}
this->controlling_boat_id = 0;
return;
}
void Client::Handle_OP_LFGCommand(const EQApplicationPacket *app)
{
if (app->size != sizeof(LFG_Struct)) {
std::cout << "Wrong size on OP_LFGCommand. Got: " << app->size << ", Expected: " << sizeof(LFG_Struct) << std::endl;
DumpPacket(app);
return;
}
// Process incoming packet
LFG_Struct* lfg = (LFG_Struct*)app->pBuffer;
switch (lfg->value & 0xFF) {
case 0:
if (LFG) {
database.SetLFG(CharacterID(), false);
LFG = false;
LFGComments[0] = '\0';
}
break;
case 1:
if (!LFG) {
LFG = true;
database.SetLFG(CharacterID(), true);
}
LFGFromLevel = lfg->FromLevel;
LFGToLevel = lfg->ToLevel;
LFGMatchFilter = lfg->MatchFilter;
strcpy(LFGComments, lfg->Comments);
break;
default:
Message(0, "Error: unknown LFG value %i", lfg->value);
}
UpdateWho();
// Issue outgoing packet to notify other clients
auto outapp = new EQApplicationPacket(OP_LFGAppearance, sizeof(LFG_Appearance_Struct));
LFG_Appearance_Struct* lfga = (LFG_Appearance_Struct*)outapp->pBuffer;
lfga->spawn_id = this->GetID();
lfga->lfg = (uint8)LFG;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
return;
}
void Client::Handle_OP_LFGGetMatchesRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(LFGGetMatchesRequest_Struct)) {
LogError("Wrong size: OP_LFGGetMatchesRequest, size=[{}], expected [{}]", app->size, sizeof(LFGGetMatchesRequest_Struct));
DumpPacket(app);
return;
}
LFGGetMatchesRequest_Struct* gmrs = (LFGGetMatchesRequest_Struct*)app->pBuffer;
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_LFGMatches, sizeof(ServerLFGMatchesRequest_Struct));
ServerLFGMatchesRequest_Struct* smrs = (ServerLFGMatchesRequest_Struct*)pack->pBuffer;
smrs->FromID = GetID();
smrs->QuerierLevel = GetLevel();
strcpy(smrs->FromName, GetName());
smrs->FromLevel = gmrs->FromLevel;
smrs->ToLevel = gmrs->ToLevel;
smrs->Classes = gmrs->Classes;
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void Client::Handle_OP_LFGuild(const EQApplicationPacket *app)
{
if (app->size < 4)
return;
uint32 Command = *((uint32 *)app->pBuffer);
switch (Command)
{
case 0:
{
VERIFY_PACKET_LENGTH(OP_LFGuild, app, LFGuild_PlayerToggle_Struct);
LFGuild_PlayerToggle_Struct *pts = (LFGuild_PlayerToggle_Struct *)app->pBuffer;
#ifdef DARWIN
#if __DARWIN_C_LEVEL < 200809L
if (strlen(pts->Comment) > 256)
#else
if (strnlen(pts->Comment, 256) > 256)
#endif // __DARWIN_C_LEVEL
#else
if (strnlen(pts->Comment, 256) > 256)
#endif // DARWIN
return;
auto pack = new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + strlen(pts->Comment) + 38);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_UpdatePlayerInfo);
pack->WriteUInt32(GetBaseClass());
pack->WriteUInt32(GetLevel());
pack->WriteUInt32(GetSpentAA());
pack->WriteString(pts->Comment);
pack->WriteUInt32(pts->Toggle);
pack->WriteUInt32(pts->TimeZone);
worldserver.SendPacket(pack);
safe_delete(pack);
break;
}
case 1:
{
VERIFY_PACKET_LENGTH(OP_LFGuild, app, LFGuild_GuildToggle_Struct);
LFGuild_GuildToggle_Struct *gts = (LFGuild_GuildToggle_Struct *)app->pBuffer;
#ifdef DARWIN
#if __DARWIN_C_LEVEL < 200809L
if (strlen(gts->Comment) > 256)
#else
if (strnlen(gts->Comment, 256) > 256)
#endif // __DARWIN_C_LEVEL
#else
if (strnlen(gts->Comment, 256) > 256)
#endif // __DARWIN
return;
auto pack =
new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + strlen(gts->Comment) +
strlen(guild_mgr.GetGuildName(GuildID())) + 43);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_UpdateGuildInfo);
pack->WriteString(guild_mgr.GetGuildName(GuildID()));
pack->WriteString(gts->Comment);
pack->WriteUInt32(gts->FromLevel);
pack->WriteUInt32(gts->ToLevel);
pack->WriteUInt32(gts->Classes);
pack->WriteUInt32(gts->AACount);
pack->WriteUInt32(gts->Toggle);
pack->WriteUInt32(gts->TimeZone);
worldserver.SendPacket(pack);
safe_delete(pack);
break;
}
case 3:
{
VERIFY_PACKET_LENGTH(OP_LFGuild, app, LFGuild_SearchPlayer_Struct);
auto pack = new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + 37);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_PlayerMatches);
LFGuild_SearchPlayer_Struct *sps = (LFGuild_SearchPlayer_Struct *)app->pBuffer;
pack->WriteUInt32(sps->FromLevel);
pack->WriteUInt32(sps->ToLevel);
pack->WriteUInt32(sps->MinAA);
pack->WriteUInt32(sps->TimeZone);
pack->WriteUInt32(sps->Classes);
worldserver.SendPacket(pack);
safe_delete(pack);
break;
}
case 4:
{
VERIFY_PACKET_LENGTH(OP_LFGuild, app, LFGuild_SearchGuild_Struct);
auto pack = new ServerPacket(ServerOP_QueryServGeneric, strlen(GetName()) + 33);
pack->WriteUInt32(zone->GetZoneID());
pack->WriteUInt32(zone->GetInstanceID());
pack->WriteString(GetName());
pack->WriteUInt32(QSG_LFGuild);
pack->WriteUInt32(QSG_LFGuild_GuildMatches);
LFGuild_SearchGuild_Struct *sgs = (LFGuild_SearchGuild_Struct *)app->pBuffer;
pack->WriteUInt32(sgs->Level);
pack->WriteUInt32(sgs->AAPoints);
pack->WriteUInt32(sgs->TimeZone);
pack->WriteUInt32(sgs->Class);
worldserver.SendPacket(pack);
safe_delete(pack);
break;
}
default:
break;
}
}
void Client::Handle_OP_LFPCommand(const EQApplicationPacket *app)
{
if (app->size != sizeof(LFP_Struct)) {
LogError("Wrong size: OP_LFPCommand, size=[{}], expected [{}]", app->size, sizeof(LFP_Struct));
DumpPacket(app);
return;
}
LFP_Struct *lfp = (LFP_Struct*)app->pBuffer;
LFP = lfp->Action != LFPOff;
database.SetLFP(CharacterID(), LFP);
if (!LFP) {
worldserver.StopLFP(CharacterID());
return;
}
GroupLFPMemberEntry LFPMembers[MAX_GROUP_MEMBERS];
for (unsigned int i = 0; i<MAX_GROUP_MEMBERS; i++) {
LFPMembers[i].Name[0] = '\0';
LFPMembers[i].Class = 0;
LFPMembers[i].Level = 0;
LFPMembers[i].Zone = 0;
LFPMembers[i].GuildID = 0xFFFF;
}
Group *g = GetGroup();
// Slot 0 is always for the group leader, or the player if not in a group
strcpy(LFPMembers[0].Name, GetName());
LFPMembers[0].Class = GetClass();
LFPMembers[0].Level = GetLevel();
LFPMembers[0].Zone = zone->GetZoneID();
LFPMembers[0].GuildID = GuildID();
if (g) {
// This should not happen. The client checks if you are in a group and will not let you put LFP on if
// you are not the leader.
if (!g->IsLeader(this)) {
LogError("Client sent LFP on for character [{}] who is grouped but not leader", GetName());
return;
}
// Fill the LFPMembers array with the rest of the group members, excluding ourself
// We don't fill in the class, level or zone, because we may not be able to determine
// them if the other group members are not in this zone. World will fill in this information
// for us, if it can.
int NextFreeSlot = 1;
for (unsigned int i = 0; i < MAX_GROUP_MEMBERS; i++) {
if (strcasecmp(g->membername[i], LFPMembers[0].Name))
strcpy(LFPMembers[NextFreeSlot++].Name, g->membername[i]);
}
}
worldserver.UpdateLFP(CharacterID(), lfp->Action, lfp->MatchFilter, lfp->FromLevel, lfp->ToLevel, lfp->Classes,
lfp->Comments, LFPMembers);
}
void Client::Handle_OP_LFPGetMatchesRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(LFPGetMatchesRequest_Struct)) {
LogError("Wrong size: OP_LFPGetMatchesRequest, size=[{}], expected [{}]", app->size, sizeof(LFPGetMatchesRequest_Struct));
DumpPacket(app);
return;
}
LFPGetMatchesRequest_Struct* gmrs = (LFPGetMatchesRequest_Struct*)app->pBuffer;
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_LFPMatches, sizeof(ServerLFPMatchesRequest_Struct));
ServerLFPMatchesRequest_Struct* smrs = (ServerLFPMatchesRequest_Struct*)pack->pBuffer;
smrs->FromID = GetID();
smrs->FromLevel = gmrs->FromLevel;
smrs->ToLevel = gmrs->ToLevel;
smrs->QuerierLevel = GetLevel();
smrs->QuerierClass = GetClass();
strcpy(smrs->FromName, GetName());
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
void Client::Handle_OP_LoadSpellSet(const EQApplicationPacket *app)
{
if (app->size != sizeof(LoadSpellSet_Struct)) {
printf("Wrong size of LoadSpellSet_Struct! Expected: %zu, Got: %i\n", sizeof(LoadSpellSet_Struct), app->size);
return;
}
int i;
LoadSpellSet_Struct* ss = (LoadSpellSet_Struct*)app->pBuffer;
for (i = 0; i < EQEmu::spells::SPELL_GEM_COUNT; i++) {
if (ss->spell[i] != 0xFFFFFFFF)
UnmemSpell(i, true);
}
}
void Client::Handle_OP_Logout(const EQApplicationPacket *app)
{
LogDebug("[{}] sent a logout packet", GetName());
SendLogoutPackets();
auto outapp = new EQApplicationPacket(OP_LogoutReply);
FastQueuePacket(&outapp);
Disconnect();
return;
}
void Client::Handle_OP_LootItem(const EQApplicationPacket *app)
{
if (app->size != sizeof(LootingItem_Struct)) {
LogError("Wrong size: OP_LootItem, size=[{}], expected [{}]", app->size, sizeof(LootingItem_Struct));
return;
}
EQApplicationPacket* outapp = nullptr;
Entity* entity = entity_list.GetID(*((uint16*)app->pBuffer));
if (entity == 0) {
Message(Chat::Red, "Error: OP_LootItem: Corpse not found (ent = 0)");
outapp = new EQApplicationPacket(OP_LootComplete, 0);
QueuePacket(outapp);
safe_delete(outapp);
return;
}
if (entity->IsCorpse()) {
entity->CastToCorpse()->LootItem(this, app);
return;
}
else {
Message(Chat::Red, "Error: Corpse not found! (!ent->IsCorpse())");
Corpse::SendEndLootErrorPacket(this);
}
return;
}
void Client::Handle_OP_LootRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
std::cout << "Wrong size: OP_LootRequest, size=" << app->size << ", expected " << sizeof(uint32) << std::endl;
return;
}
Entity* ent = entity_list.GetID(*((uint32*)app->pBuffer));
if (ent == 0) {
Message(Chat::Red, "Error: OP_LootRequest: Corpse not found (ent = 0)");
Corpse::SendLootReqErrorPacket(this);
return;
}
if (ent->IsCorpse())
{
SetLooting(ent->GetID()); //store the entity we are looting
ent->CastToCorpse()->MakeLootRequestPackets(this, app);
return;
}
else {
std::cout << "npc == 0 LOOTING FOOKED3" << std::endl;
Message(Chat::Red, "Error: OP_LootRequest: Corpse not a corpse?");
Corpse::SendLootReqErrorPacket(this);
}
return;
}
void Client::Handle_OP_ManaChange(const EQApplicationPacket *app)
{
if (app->size == 0) {
// i think thats the sign to stop the songs
if (IsBardSong(casting_spell_id) || bardsong != 0)
InterruptSpell(SONG_ENDS, 0x121);
else
InterruptSpell(INTERRUPT_SPELL, 0x121);
return;
}
else // I don't think the client sends proper manachanges
{ // with a length, just the 0 len ones for stopping songs
//ManaChange_Struct* p = (ManaChange_Struct*)app->pBuffer;
printf("OP_ManaChange from client:\n");
DumpPacket(app);
}
return;
}
/*
#if 0 // I dont think there's an op for this now, and we check this
// when the client is sitting
void Client::Handle_OP_Medding(const EQApplicationPacket *app)
{
if (app->pBuffer[0])
medding = true;
else
medding = false;
return;
}
#endif
*/
void Client::Handle_OP_MemorizeSpell(const EQApplicationPacket *app)
{
OPMemorizeSpell(app);
return;
}
void Client::Handle_OP_Mend(const EQApplicationPacket *app)
{
if (!HasSkill(EQEmu::skills::SkillMend))
return;
if (!p_timers.Expired(&database, pTimerMend, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerMend, MendReuseTime - 1);
int mendhp = GetMaxHP() / 4;
int currenthp = GetHP();
if (zone->random.Int(0, 199) < (int)GetSkill(EQEmu::skills::SkillMend)) {
int criticalchance = spellbonuses.CriticalMend + itembonuses.CriticalMend + aabonuses.CriticalMend;
if (zone->random.Int(0, 99) < criticalchance) {
mendhp *= 2;
MessageString(Chat::LightBlue, MEND_CRITICAL);
}
SetHP(GetHP() + mendhp);
SendHPUpdate();
MessageString(Chat::LightBlue, MEND_SUCCESS);
}
else {
/* the purpose of the following is to make the chance to worsen wounds much less common,
which is more consistent with the way eq live works.
according to my math, this should result in the following probability:
0 skill - 25% chance to worsen
20 skill - 23% chance to worsen
50 skill - 16% chance to worsen */
if ((GetSkill(EQEmu::skills::SkillMend) <= 75) && (zone->random.Int(GetSkill(EQEmu::skills::SkillMend), 100) < 75) && (zone->random.Int(1, 3) == 1))
{
SetHP(currenthp > mendhp ? (GetHP() - mendhp) : 1);
SendHPUpdate();
MessageString(Chat::LightBlue, MEND_WORSEN);
}
else
MessageString(Chat::LightBlue, MEND_FAIL);
}
CheckIncreaseSkill(EQEmu::skills::SkillMend, nullptr, 10);
return;
}
void Client::Handle_OP_MercenaryCommand(const EQApplicationPacket *app)
{
if (app->size != sizeof(MercenaryCommand_Struct))
{
Message(Chat::Red, "Size mismatch in OP_MercenaryCommand expected %i got %i", sizeof(MercenaryCommand_Struct), app->size);
LogDebug("Size mismatch in OP_MercenaryCommand expected [{}] got [{}]", sizeof(MercenaryCommand_Struct), app->size);
DumpPacket(app);
return;
}
MercenaryCommand_Struct* mc = (MercenaryCommand_Struct*)app->pBuffer;
uint32 merc_command = mc->MercCommand; // Seen 0 (zone in with no merc or suspended), 1 (dismiss merc), 5 (normal state), 20 (unknown), 36 (zone in with merc)
int32 option = mc->Option; // Seen -1 (zone in with no merc), 0 (setting to passive stance), 1 (normal or setting to balanced stance)
Log(Logs::General, Logs::Mercenaries, "Command %i, Option %i received from %s.", merc_command, option, GetName());
if (!RuleB(Mercs, AllowMercs))
return;
// Handle the Command here...
// Will need a list of what every type of command is supposed to do
// Unsure if there is a server response to this packet
if (option >= 0)
{
Merc* merc = GetMerc();
GetMercInfo().State = option;
if (merc)
{
uint8 numStances = 0;
//get number of available stances for the current merc
std::list<MercStanceInfo> mercStanceList = zone->merc_stance_list[merc->GetMercTemplateID()];
auto iter = mercStanceList.begin();
while (iter != mercStanceList.end()) {
numStances++;
++iter;
}
MercTemplate* mercTemplate = zone->GetMercTemplate(GetMerc()->GetMercTemplateID());
if (mercTemplate)
{
//check to see if selected option is a valid stance slot (option is the slot the stance is in, not the actual stance)
if (option >= 0 && option < numStances)
{
merc->SetStance((EQEmu::constants::StanceType)mercTemplate->Stances[option]);
GetMercInfo().Stance = mercTemplate->Stances[option];
Log(Logs::General, Logs::Mercenaries, "Set Stance: %u for %s (%s)", merc->GetStance(), merc->GetName(), GetName());
}
}
}
}
}
void Client::Handle_OP_MercenaryDataRequest(const EQApplicationPacket *app)
{
// The payload is 4 bytes. The EntityID of the Mercenary Liason which are of class 71.
if (app->size != sizeof(MercenaryMerchantShopRequest_Struct))
{
LogDebug("Size mismatch in OP_MercenaryDataRequest expected 4 got [{}]", app->size);
DumpPacket(app);
return;
}
MercenaryMerchantShopRequest_Struct* mmsr = (MercenaryMerchantShopRequest_Struct*)app->pBuffer;
uint32 merchant_id = mmsr->MercMerchantID;
uint32 altCurrentType = 19;
Log(Logs::General, Logs::Mercenaries, "Data Request for Merchant ID (%i) for %s.", merchant_id, GetName());
//client is requesting data about currently owned mercenary
if (merchant_id == 0) {
//send info about your current merc(s)
if (GetMercInfo().mercid)
{
Log(Logs::General, Logs::Mercenaries, "SendMercPersonalInfo Request for %s.", GetName());
SendMercPersonalInfo();
}
else
{
Log(Logs::General, Logs::Mercenaries, "SendMercPersonalInfo Not Sent - MercID (%i) for %s.", GetMercInfo().mercid, GetName());
}
}
if (!RuleB(Mercs, AllowMercs)) {
return;
}
NPC* tar = entity_list.GetNPCByID(merchant_id);
if (tar) {
int mercTypeCount = 0;
int mercCount = 0;
if (DistanceSquared(m_Position, tar->GetPosition()) > USE_NPC_RANGE2)
return;
if (tar->GetClass() != MERCERNARY_MASTER) {
return;
}
mercTypeCount = tar->GetNumMercTypes(static_cast<unsigned int>(ClientVersion()));
mercCount = tar->GetNumMercs(static_cast<unsigned int>(ClientVersion()));
if (mercCount > MAX_MERC)
return;
std::list<MercType> mercTypeList = tar->GetMercTypesList(static_cast<unsigned int>(ClientVersion()));
std::list<MercData> mercDataList = tar->GetMercsList(static_cast<unsigned int>(ClientVersion()));
int i = 0;
int StanceCount = 0;
for (auto mercListItr = mercDataList.begin(); mercListItr != mercDataList.end(); ++mercListItr) {
auto siter = zone->merc_stance_list[mercListItr->MercTemplateID].begin();
for (siter = zone->merc_stance_list[mercListItr->MercTemplateID].begin(); siter != zone->merc_stance_list[mercListItr->MercTemplateID].end(); ++siter)
{
StanceCount++;
}
}
auto outapp = new EQApplicationPacket(OP_MercenaryDataResponse, sizeof(MercenaryMerchantList_Struct));
MercenaryMerchantList_Struct* mml = (MercenaryMerchantList_Struct*)outapp->pBuffer;
mml->MercTypeCount = mercTypeCount;
if (mercTypeCount > 0)
{
for (auto mercTypeListItr = mercTypeList.begin(); mercTypeListItr != mercTypeList.end();
++mercTypeListItr) {
mml->MercGrades[i] = mercTypeListItr->Type; // DBStringID for Type
i++;
}
}
mml->MercCount = mercCount;
if (mercCount > 0)
{
i = 0;
for (auto mercListIter = mercDataList.begin(); mercListIter != mercDataList.end();
++mercListIter) {
mml->Mercs[i].MercID = mercListIter->MercTemplateID;
mml->Mercs[i].MercType = mercListIter->MercType;
mml->Mercs[i].MercSubType = mercListIter->MercSubType;
mml->Mercs[i].PurchaseCost = RuleB(Mercs, ChargeMercPurchaseCost) ? Merc::CalcPurchaseCost(mercListIter->MercTemplateID, GetLevel(), 0) : 0;
mml->Mercs[i].UpkeepCost = RuleB(Mercs, ChargeMercUpkeepCost) ? Merc::CalcUpkeepCost(mercListIter->MercTemplateID, GetLevel(), 0) : 0;
mml->Mercs[i].Status = 0;
mml->Mercs[i].AltCurrencyCost = RuleB(Mercs, ChargeMercPurchaseCost) ? Merc::CalcPurchaseCost(mercListIter->MercTemplateID, GetLevel(), altCurrentType) : 0;
mml->Mercs[i].AltCurrencyUpkeep = RuleB(Mercs, ChargeMercUpkeepCost) ? Merc::CalcUpkeepCost(mercListIter->MercTemplateID, GetLevel(), altCurrentType) : 0;
mml->Mercs[i].AltCurrencyType = altCurrentType;
mml->Mercs[i].MercUnk01 = 0;
mml->Mercs[i].TimeLeft = -1;
mml->Mercs[i].MerchantSlot = i + 1;
mml->Mercs[i].MercUnk02 = 1;
int mercStanceCount = 0;
auto iter = zone->merc_stance_list[mercListIter->MercTemplateID].begin();
for (iter = zone->merc_stance_list[mercListIter->MercTemplateID].begin(); iter != zone->merc_stance_list[mercListIter->MercTemplateID].end(); ++iter)
{
mercStanceCount++;
}
mml->Mercs[i].StanceCount = mercStanceCount;
mml->Mercs[i].MercUnk03 = 519044964;
mml->Mercs[i].MercUnk04 = 1;
//mml->Mercs[i].MercName;
int stanceindex = 0;
if (mercStanceCount > 0)
{
auto iter2 = zone->merc_stance_list[mercListIter->MercTemplateID].begin();
while (iter2 != zone->merc_stance_list[mercListIter->MercTemplateID].end())
{
mml->Mercs[i].Stances[stanceindex].StanceIndex = stanceindex;
mml->Mercs[i].Stances[stanceindex].Stance = (iter2->StanceID);
stanceindex++;
++iter2;
}
}
i++;
}
}
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_MercenaryDataUpdateRequest(const EQApplicationPacket *app)
{
// The payload is 0 bytes.
if (app->size != 0)
{
Message(Chat::Red, "Size mismatch in OP_MercenaryDataUpdateRequest expected 0 got %i", app->size);
LogDebug("Size mismatch in OP_MercenaryDataUpdateRequest expected 0 got [{}]", app->size);
DumpPacket(app);
return;
}
Log(Logs::General, Logs::Mercenaries, "Data Update Request Received for %s.", GetName());
if (GetMercID())
{
SendMercPersonalInfo();
}
}
void Client::Handle_OP_MercenaryDismiss(const EQApplicationPacket *app)
{
// The payload is 0 or 1 bytes.
if (app->size > 1)
{
Message(Chat::Red, "Size mismatch in OP_MercenaryDismiss expected 0 got %i", app->size);
LogDebug("Size mismatch in OP_MercenaryDismiss expected 0 got [{}]", app->size);
DumpPacket(app);
return;
}
uint8 Command = 0;
if (app->size > 0)
{
char *InBuffer = (char *)app->pBuffer;
Command = VARSTRUCT_DECODE_TYPE(uint8, InBuffer);
}
Log(Logs::General, Logs::Mercenaries, "Dismiss Request ( %i ) Received for %s.", Command, GetName());
// Handle the dismiss here...
DismissMerc(GetMercInfo().mercid);
}
void Client::Handle_OP_MercenaryHire(const EQApplicationPacket *app)
{
// The payload is 16 bytes. First four bytes are the Merc ID (Template ID)
if (app->size != sizeof(MercenaryMerchantRequest_Struct))
{
LogDebug("Size mismatch in OP_MercenaryHire expected [{}] got [{}]", sizeof(MercenaryMerchantRequest_Struct), app->size);
DumpPacket(app);
return;
}
MercenaryMerchantRequest_Struct* mmrq = (MercenaryMerchantRequest_Struct*)app->pBuffer;
uint32 merc_template_id = mmrq->MercID;
uint32 merchant_id = mmrq->MercMerchantID;
uint32 merc_unk1 = mmrq->MercUnk01;
uint32 merc_unk2 = mmrq->MercUnk02;
Log(Logs::General, Logs::Mercenaries, "Template ID (%i), Merchant ID (%i), Unknown1 (%i), Unknown2 (%i), Client: %s", merc_template_id, merchant_id, merc_unk1, merc_unk2, GetName());
//HirePending = true;
SetHoTT(0);
SendTargetCommand(0);
if (!RuleB(Mercs, AllowMercs))
return;
MercTemplate* merc_template = zone->GetMercTemplate(merc_template_id);
if (merc_template)
{
Mob* merchant = entity_list.GetNPCByID(merchant_id);
if (!CheckCanHireMerc(merchant, merc_template_id))
{
return;
}
// Set time remaining to max on Hire
GetMercInfo().MercTimerRemaining = RuleI(Mercs, UpkeepIntervalMS);
// Get merc, assign it to client & spawn
Merc* merc = Merc::LoadMerc(this, merc_template, merchant_id, false);
if (merc)
{
SpawnMerc(merc, true);
merc->Save();
if (RuleB(Mercs, ChargeMercPurchaseCost))
{
uint32 cost = Merc::CalcPurchaseCost(merc_template->MercTemplateID, GetLevel()) * 100; // Cost is in gold
TakeMoneyFromPP(cost, true);
}
// approved hire request
SendMercMerchantResponsePacket(0);
}
else
{
//merc failed to spawn
SendMercMerchantResponsePacket(3);
}
}
else
{
//merc doesn't exist in db
SendMercMerchantResponsePacket(2);
}
}
void Client::Handle_OP_MercenarySuspendRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(SuspendMercenary_Struct))
{
Message(Chat::Red, "Size mismatch in OP_MercenarySuspendRequest expected %i got %i", sizeof(SuspendMercenary_Struct), app->size);
LogDebug("Size mismatch in OP_MercenarySuspendRequest expected [{}] got [{}]", sizeof(SuspendMercenary_Struct), app->size);
DumpPacket(app);
return;
}
SuspendMercenary_Struct* sm = (SuspendMercenary_Struct*)app->pBuffer;
uint32 merc_suspend = sm->SuspendMerc; // Seen 30 for suspending or unsuspending
Log(Logs::General, Logs::Mercenaries, "Suspend ( %i ) received for %s.", merc_suspend, GetName());
if (!RuleB(Mercs, AllowMercs))
return;
// Check if the merc is suspended and if so, unsuspend, otherwise suspend it
SuspendMercCommand();
}
void Client::Handle_OP_MercenaryTimerRequest(const EQApplicationPacket *app)
{
// The payload is 0 bytes.
if (app->size > 1)
{
Message(Chat::Red, "Size mismatch in OP_MercenaryTimerRequest expected 0 got %i", app->size);
LogDebug("Size mismatch in OP_MercenaryTimerRequest expected 0 got [{}]", app->size);
DumpPacket(app);
return;
}
Log(Logs::General, Logs::Mercenaries, "Timer Request received for %s.", GetName());
if (!RuleB(Mercs, AllowMercs)) {
return;
}
// To Do: Load Mercenary Timer Data to properly populate this reply packet
// All hard set values for now
uint32 entityID = 0;
uint32 mercState = 5;
uint32 suspendedTime = 0;
if (GetMercID()) {
Merc* merc = GetMerc();
if (merc) {
entityID = merc->GetID();
if (GetMercInfo().IsSuspended) {
mercState = 1;
suspendedTime = GetMercInfo().SuspendedTime;
}
}
}
if (entityID > 0) {
SendMercTimerPacket(entityID, mercState, suspendedTime, GetMercInfo().MercTimerRemaining, RuleI(Mercs, SuspendIntervalMS));
}
}
void Client::Handle_OP_MoveCoin(const EQApplicationPacket *app)
{
if (app->size != sizeof(MoveCoin_Struct)) {
LogError("Wrong size on OP_MoveCoin. Got: [{}], Expected: [{}]", app->size, sizeof(MoveCoin_Struct));
DumpPacket(app);
return;
}
OPMoveCoin(app);
return;
}
void Client::Handle_OP_MoveItem(const EQApplicationPacket *app)
{
if (!CharacterID())
{
return;
}
if (app->size != sizeof(MoveItem_Struct)) {
LogError("Wrong size: OP_MoveItem, size=[{}], expected [{}]", app->size, sizeof(MoveItem_Struct));
return;
}
MoveItem_Struct* mi = (MoveItem_Struct*)app->pBuffer;
if (spellend_timer.Enabled() && casting_spell_id && !IsBardSong(casting_spell_id))
{
if (mi->from_slot != mi->to_slot && (mi->from_slot <= EQEmu::invslot::GENERAL_END || mi->from_slot > 39) && IsValidSlot(mi->from_slot) && IsValidSlot(mi->to_slot))
{
char *detect = nullptr;
const EQEmu::ItemInstance *itm_from = GetInv().GetItem(mi->from_slot);
const EQEmu::ItemInstance *itm_to = GetInv().GetItem(mi->to_slot);
MakeAnyLenString(&detect, "Player issued a move item from %u(item id %u) to %u(item id %u) while casting %u.",
mi->from_slot,
itm_from ? itm_from->GetID() : 0,
mi->to_slot,
itm_to ? itm_to->GetID() : 0,
casting_spell_id);
database.SetMQDetectionFlag(AccountName(), GetName(), detect, zone->GetShortName());
safe_delete_array(detect);
Kick("Inventory desync"); // Kick client to prevent client and server from getting out-of-sync inventory slots
return;
}
}
// Illegal bagslot usage checks. Currently, user only receives a message if this check is triggered.
bool mi_hack = false;
if (mi->from_slot >= EQEmu::invbag::GENERAL_BAGS_BEGIN && mi->from_slot <= EQEmu::invbag::CURSOR_BAG_END) {
if (mi->from_slot >= EQEmu::invbag::CURSOR_BAG_BEGIN) { mi_hack = true; }
else {
int16 from_parent = m_inv.CalcSlotId(mi->from_slot);
if (!m_inv[from_parent]) { mi_hack = true; }
else if (!m_inv[from_parent]->IsClassBag()) { mi_hack = true; }
else if (m_inv.CalcBagIdx(mi->from_slot) >= m_inv[from_parent]->GetItem()->BagSlots) { mi_hack = true; }
}
}
if (mi->to_slot >= EQEmu::invbag::GENERAL_BAGS_BEGIN && mi->to_slot <= EQEmu::invbag::CURSOR_BAG_END) {
if (mi->to_slot >= EQEmu::invbag::CURSOR_BAG_BEGIN) { mi_hack = true; }
else {
int16 to_parent = m_inv.CalcSlotId(mi->to_slot);
if (!m_inv[to_parent]) { mi_hack = true; }
else if (!m_inv[to_parent]->IsClassBag()) { mi_hack = true; }
else if (m_inv.CalcBagIdx(mi->to_slot) >= m_inv[to_parent]->GetItem()->BagSlots) { mi_hack = true; }
}
}
if (mi_hack) { Message(Chat::Yellow, "Caution: Illegal use of inaccessible bag slots!"); }
if (!SwapItem(mi) && IsValidSlot(mi->from_slot) && IsValidSlot(mi->to_slot)) {
SwapItemResync(mi);
bool error = false;
InterrogateInventory(this, false, true, false, error, false);
if (error)
InterrogateInventory(this, true, false, true, error);
}
return;
}
void Client::Handle_OP_MoveMultipleItems(const EQApplicationPacket *app)
{
Kick("Unimplemented move multiple items"); // TODO: lets not desync though
}
void Client::Handle_OP_OpenContainer(const EQApplicationPacket *app)
{
// Does not exist in Ti client
// SoF, SoD and UF clients send a 4-byte packet indicating the 'parent' slot
// SoF, SoD and UF slots are defined by a uint32 value and currently untranslated
// RoF client sends a 12-byte packet based on the RoF::Structs::ItemSlotStruct
// RoF structure types are defined as signed uint16 and currently untranslated
// RoF::struct.SlotType = {0 - Equipment, 1 - Bank, 2 - Shared Bank} // not tested beyond listed types
// RoF::struct.Unknown2 = 0
// RoF::struct.MainSlot = { <parent slot range designated by slottype..zero-based> }
// RoF::struct.SubSlot = -1 (non-child)
// RoF::struct.AugSlot = -1 (non-child)
// RoF::struct.Unknown1 = 141 (unsure why, but always appears to be this value..combine containers not tested)
// SideNote: Watching the slot translations, Unknown1 is showing '141' as well on certain item swaps.
// Manually looting a corpse results in a from '34' to '68' value for equipment items, '0' to '0' for inventory.
}
void Client::Handle_OP_OpenGuildTributeMaster(const EQApplicationPacket *app)
{
LogTribute("Received OP_OpenGuildTributeMaster of length [{}]", app->size);
if (app->size != sizeof(StartTribute_Struct))
printf("Error in OP_OpenGuildTributeMaster. Expected size of: %zu, but got: %i\n", sizeof(StartTribute_Struct), app->size);
else {
//Opens the guild tribute master window
StartTribute_Struct* st = (StartTribute_Struct*)app->pBuffer;
Mob* tribmast = entity_list.GetMob(st->tribute_master_id);
if (tribmast && tribmast->IsNPC() && tribmast->GetClass() == GUILD_TRIBUTE_MASTER
&& DistanceSquared(m_Position, tribmast->GetPosition()) <= USE_NPC_RANGE2) {
st->response = 1;
QueuePacket(app);
tribute_master_id = st->tribute_master_id;
DoTributeUpdate();
}
else {
st->response = 0;
QueuePacket(app);
}
}
return;
}
void Client::Handle_OP_OpenInventory(const EQApplicationPacket *app)
{
// Does not exist in Ti, UF or RoF clients
// SoF and SoD both send a 4-byte packet with a uint32 value of '8'
}
void Client::Handle_OP_OpenTributeMaster(const EQApplicationPacket *app)
{
LogTribute("Received OP_OpenTributeMaster of length [{}]", app->size);
if (app->size != sizeof(StartTribute_Struct))
printf("Error in OP_OpenTributeMaster. Expected size of: %zu, but got: %i\n", sizeof(StartTribute_Struct), app->size);
else {
//Opens the tribute master window
StartTribute_Struct* st = (StartTribute_Struct*)app->pBuffer;
Mob* tribmast = entity_list.GetMob(st->tribute_master_id);
if (tribmast && tribmast->IsNPC() && tribmast->GetClass() == TRIBUTE_MASTER
&& DistanceSquared(m_Position, tribmast->GetPosition()) <= USE_NPC_RANGE2) {
st->response = 1;
QueuePacket(app);
tribute_master_id = st->tribute_master_id;
DoTributeUpdate();
}
else {
st->response = 0;
QueuePacket(app);
}
}
return;
}
void Client::Handle_OP_PDeletePetition(const EQApplicationPacket *app)
{
if (app->size < 2) {
LogError("Wrong size: OP_PDeletePetition, size=[{}], expected [{}]", app->size, 2);
return;
}
if (petition_list.DeletePetitionByCharName((char*)app->pBuffer))
MessageString(Chat::White, PETITION_DELETED);
else
MessageString(Chat::White, PETITION_NO_DELETE);
return;
}
void Client::Handle_OP_PetCommands(const EQApplicationPacket *app)
{
if (app->size != sizeof(PetCommand_Struct)) {
LogError("Wrong size: OP_PetCommands, size=[{}], expected [{}]", app->size, sizeof(PetCommand_Struct));
return;
}
char val1[20] = { 0 };
PetCommand_Struct* pet = (PetCommand_Struct*)app->pBuffer;
Mob* mypet = this->GetPet();
Mob *target = entity_list.GetMob(pet->target);
if (!mypet || pet->command == PET_LEADER) {
if (pet->command == PET_LEADER) {
// we either send the ID of an NPC we're interested in or no ID for our own pet
if (target) {
auto owner = target->GetOwner();
if (owner)
target->SayString(PET_LEADERIS, owner->GetCleanName());
else
target->SayString(I_FOLLOW_NOONE);
} else if (mypet) {
mypet->SayString(PET_LEADERIS, GetName());
}
}
return;
}
if (mypet->GetPetType() == petTargetLock && (pet->command != PET_HEALTHREPORT && pet->command != PET_GETLOST))
return;
// just let the command "/pet get lost" work for familiars
if (mypet->GetPetType() == petFamiliar && pet->command != PET_GETLOST)
return;
uint32 PetCommand = pet->command;
// Handle Sit/Stand toggle in UF and later.
/*
if (GetClientVersion() >= EQClientUnderfoot)
{
if (PetCommand == PET_SITDOWN)
if (mypet->GetPetOrder() == SPO_Sit)
PetCommand = PET_STANDUP;
}
*/
switch (PetCommand)
{
case PET_ATTACK: {
if (!target)
break;
if (target->IsMezzed()) {
MessageString(Chat::NPCQuestSay, CANNOT_WAKE, mypet->GetCleanName(), target->GetCleanName());
break;
}
if (mypet->IsFeared())
break; //prevent pet from attacking stuff while feared
if (!mypet->IsAttackAllowed(target)) {
mypet->SayString(this, NOT_LEGAL_TARGET);
break;
}
// default range is 200, takes Z into account
// really they do something weird where they're added to the aggro list then remove them
// and will attack if they come in range -- too lazy, lets remove exploits for now
if (DistanceSquared(mypet->GetPosition(), target->GetPosition()) >= RuleR(Aggro, PetAttackRange)) {
// they say they're attacking then remove on live ... so they don't really say anything in this case ...
break;
}
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (target != this && DistanceSquaredNoZ(mypet->GetPosition(), target->GetPosition()) <= (RuleR(Pets, AttackCommandRange)*RuleR(Pets, AttackCommandRange))) {
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
}
zone->AddAggroMob();
// classic acts like qattack
int hate = 1;
if (mypet->IsEngaged()) {
auto top = mypet->GetHateMost();
if (top && top != target)
hate += mypet->GetHateAmount(top) - mypet->GetHateAmount(target) + 100; // should be enough to cause target change
}
mypet->AddToHateList(target, hate, 0, true, false, false, SPELL_UNKNOWN, true);
MessageString(Chat::PetResponse, PET_ATTACKING, mypet->GetCleanName(), target->GetCleanName());
SetTarget(target);
}
}
break;
}
case PET_QATTACK: {
if (mypet->IsFeared())
break; //prevent pet from attacking stuff while feared
if (!GetTarget())
break;
if (GetTarget()->IsMezzed()) {
MessageString(Chat::NPCQuestSay, CANNOT_WAKE, mypet->GetCleanName(), GetTarget()->GetCleanName());
break;
}
if (!mypet->IsAttackAllowed(GetTarget())) {
mypet->SayString(this, NOT_LEGAL_TARGET);
break;
}
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (GetTarget() != this && DistanceSquaredNoZ(mypet->GetPosition(), GetTarget()->GetPosition()) <= (RuleR(Pets, AttackCommandRange)*RuleR(Pets, AttackCommandRange))) {
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
}
zone->AddAggroMob();
mypet->AddToHateList(GetTarget(), 1, 0, true, false, false, SPELL_UNKNOWN, true);
MessageString(Chat::PetResponse, PET_ATTACKING, mypet->GetCleanName(), GetTarget()->GetCleanName());
}
}
break;
}
case PET_BACKOFF: {
if (mypet->IsFeared()) break; //keeps pet running while feared
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_CALMING);
mypet->WipeHateList();
mypet->SetTarget(nullptr);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
break;
}
case PET_HEALTHREPORT: {
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
MessageString(Chat::PetResponse, PET_REPORT_HP, ConvertArrayF(mypet->GetHPRatio(), val1));
mypet->ShowBuffList(this);
}
break;
}
case PET_GETLOST: {
if (mypet->Charmed())
break;
if (mypet->GetPetType() == petCharmed || !mypet->IsNPC()) {
// eqlive ignores this command
// we could just remove the charm
// and continue
mypet->BuffFadeByEffect(SE_Charm);
break;
}
else {
SetPet(nullptr);
}
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
mypet->CastToNPC()->Depop();
//Oddly, the client (Titanium) will still allow "/pet get lost" command despite me adding the code below. If someone can figure that out, you can uncomment this code and use it.
/*
if((mypet->GetPetType() == petAnimation && GetAA(aaAnimationEmpathy) >= 2) || mypet->GetPetType() != petAnimation) {
mypet->SayString(PET_GETLOST_STRING);
mypet->CastToNPC()->Depop();
}
*/
break;
}
case PET_GUARDHERE: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (mypet->IsNPC()) {
mypet->SayString(this, Chat::PetResponse, PET_GUARDINGLIFE);
mypet->SetPetOrder(SPO_Guard);
mypet->CastToNPC()->SaveGuardSpot(mypet->GetPosition());
if (!mypet->GetTarget()) // want them to not twitch if they're chasing something down
mypet->StopNavigation();
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
}
break;
}
case PET_FOLLOWME: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_FOLLOWING);
mypet->SetPetOrder(SPO_Follow);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
break;
}
case PET_TAUNT: {
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (mypet->CastToNPC()->IsTaunting())
{
MessageString(Chat::PetResponse, PET_NO_TAUNT);
mypet->CastToNPC()->SetTaunting(false);
}
else
{
MessageString(Chat::PetResponse, PET_DO_TAUNT);
mypet->CastToNPC()->SetTaunting(true);
}
}
break;
}
case PET_TAUNT_ON: {
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
MessageString(Chat::PetResponse, PET_DO_TAUNT);
mypet->CastToNPC()->SetTaunting(true);
}
break;
}
case PET_TAUNT_OFF: {
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
MessageString(Chat::PetResponse, PET_NO_TAUNT);
mypet->CastToNPC()->SetTaunting(false);
}
break;
}
case PET_GUARDME: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_GUARDME_STRING);
mypet->SetPetOrder(SPO_Follow);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
break;
}
case PET_SIT: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (mypet->GetPetOrder() == SPO_Sit)
{
mypet->SayString(this, Chat::PetResponse, PET_SIT_STRING);
mypet->SetPetOrder(SPO_Follow);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
}
else
{
mypet->SayString(this, Chat::PetResponse, PET_SIT_STRING);
mypet->SetPetOrder(SPO_Sit);
mypet->SetRunAnimSpeed(0);
if (!mypet->UseBardSpellLogic()) //maybe we can have a bard pet
mypet->InterruptSpell(); //No cast 4 u. //i guess the pet should start casting
mypet->SendAppearancePacket(AT_Anim, ANIM_SIT);
}
}
break;
}
case PET_STANDUP: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_SIT_STRING);
mypet->SetPetOrder(SPO_Follow);
mypet->SendAppearancePacket(AT_Anim, ANIM_STAND);
}
break;
}
case PET_SITDOWN: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SayString(this, Chat::PetResponse, PET_SIT_STRING);
mypet->SetPetOrder(SPO_Sit);
mypet->SetRunAnimSpeed(0);
if (!mypet->UseBardSpellLogic()) //maybe we can have a bard pet
mypet->InterruptSpell(); //No cast 4 u. //i guess the pet should start casting
mypet->SendAppearancePacket(AT_Anim, ANIM_SIT);
}
break;
}
case PET_HOLD: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsHeld())
{
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_HOLD_SET_OFF);
mypet->SetHeld(false);
}
else
{
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_HOLD_SET_ON);
if (m_ClientVersionBit & EQEmu::versions::maskUFAndLater)
mypet->SayString(this, Chat::PetResponse, PET_NOW_HOLDING);
else
mypet->SayString(this, Chat::PetResponse, PET_ON_HOLD);
mypet->SetHeld(true);
}
mypet->SetGHeld(false);
SetPetCommandState(PET_BUTTON_GHOLD, 0);
}
break;
}
case PET_HOLD_ON: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC() && !mypet->IsHeld()) {
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_HOLD_SET_ON);
if (m_ClientVersionBit & EQEmu::versions::maskUFAndLater)
mypet->SayString(this, Chat::PetResponse, PET_NOW_HOLDING);
else
mypet->SayString(this, Chat::PetResponse, PET_ON_HOLD);
mypet->SetHeld(true);
mypet->SetGHeld(false);
SetPetCommandState(PET_BUTTON_GHOLD, 0);
}
break;
}
case PET_HOLD_OFF: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC() && mypet->IsHeld()) {
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_HOLD_SET_OFF);
mypet->SetHeld(false);
}
break;
}
case PET_GHOLD: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsGHeld())
{
if (m_ClientVersionBit & EQEmu::versions::maskUFAndLater)
MessageString(Chat::PetResponse, PET_OFF_GHOLD);
mypet->SetGHeld(false);
}
else
{
if (m_ClientVersionBit & EQEmu::versions::maskUFAndLater) {
MessageString(Chat::PetResponse, PET_ON_GHOLD);
mypet->SayString(this, Chat::PetResponse, PET_GHOLD_ON_MSG);
} else {
mypet->SayString(this, Chat::PetResponse, PET_ON_HOLD);
}
mypet->SetGHeld(true);
}
mypet->SetHeld(false);
SetPetCommandState(PET_BUTTON_HOLD, 0);
}
break;
}
case PET_GHOLD_ON: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (m_ClientVersionBit & EQEmu::versions::maskUFAndLater) {
MessageString(Chat::PetResponse, PET_ON_GHOLD);
mypet->SayString(this, Chat::PetResponse, PET_GHOLD_ON_MSG);
} else {
mypet->SayString(this, Chat::PetResponse, PET_ON_HOLD);
}
mypet->SetGHeld(true);
mypet->SetHeld(false);
SetPetCommandState(PET_BUTTON_HOLD, 0);
}
break;
}
case PET_GHOLD_OFF: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC() && mypet->IsGHeld()) {
if (m_ClientVersionBit & EQEmu::versions::maskUFAndLater)
MessageString(Chat::PetResponse, PET_OFF_GHOLD);
mypet->SetGHeld(false);
}
break;
}
case PET_SPELLHOLD: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (mypet->IsNoCast()) {
MessageString(Chat::PetResponse, PET_CASTING);
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_SPELLHOLD_SET_OFF);
mypet->SetNoCast(false);
}
else {
MessageString(Chat::PetResponse, PET_NOT_CASTING);
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_SPELLHOLD_SET_ON);
mypet->SetNoCast(true);
}
}
break;
}
case PET_SPELLHOLD_ON: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (!mypet->IsNoCast()) {
MessageString(Chat::PetResponse, PET_NOT_CASTING);
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_SPELLHOLD_SET_ON);
mypet->SetNoCast(true);
}
}
break;
}
case PET_SPELLHOLD_OFF: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (mypet->IsNoCast()) {
MessageString(Chat::PetResponse, PET_CASTING);
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_SPELLHOLD_SET_OFF);
mypet->SetNoCast(false);
}
}
break;
}
case PET_FOCUS: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (mypet->IsFocused()) {
MessageString(Chat::PetResponse, PET_NOT_FOCUSING);
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_FOCUS_SET_OFF);
mypet->SetFocused(false);
}
else {
MessageString(Chat::PetResponse, PET_NOW_FOCUSING);
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_FOCUS_SET_ON);
mypet->SetFocused(true);
}
}
break;
}
case PET_FOCUS_ON: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (!mypet->IsFocused()) {
MessageString(Chat::PetResponse, PET_NOW_FOCUSING);
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_FOCUS_SET_ON);
mypet->SetFocused(true);
}
}
break;
}
case PET_FOCUS_OFF: {
if (aabonuses.PetCommands[PetCommand] && mypet->IsNPC()) {
if (mypet->IsFeared())
break;
if (mypet->IsFocused()) {
MessageString(Chat::PetResponse, PET_NOT_FOCUSING);
if (m_ClientVersionBit & EQEmu::versions::maskSoDAndLater)
MessageString(Chat::PetResponse, PET_FOCUS_SET_OFF);
mypet->SetFocused(false);
}
}
break;
}
case PET_STOP: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
} else {
mypet->SetPetStop(true);
mypet->StopNavigation();
mypet->SetTarget(nullptr);
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
}
}
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
}
break;
}
case PET_STOP_ON: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SetPetStop(true);
mypet->StopNavigation();
mypet->SetTarget(nullptr);
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
SetPetCommandState(PET_BUTTON_REGROUP, 0);
}
}
break;
}
case PET_STOP_OFF: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if ((mypet->GetPetType() == petAnimation && aabonuses.PetCommands[PetCommand]) || mypet->GetPetType() != petAnimation) {
mypet->SetPetStop(false);
mypet->SayString(this, Chat::PetResponse, PET_GETLOST_STRING);
}
break;
}
case PET_REGROUP: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if (aabonuses.PetCommands[PetCommand]) {
if (mypet->IsPetRegroup()) {
mypet->SetPetRegroup(false);
mypet->SayString(this, Chat::PetResponse, PET_OFF_REGROUPING);
} else {
mypet->SetPetRegroup(true);
mypet->SetTarget(nullptr);
mypet->SayString(this, Chat::PetResponse, PET_ON_REGROUPING);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
}
break;
}
case PET_REGROUP_ON: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if (aabonuses.PetCommands[PetCommand]) {
mypet->SetPetRegroup(true);
mypet->SetTarget(nullptr);
mypet->SayString(this, Chat::PetResponse, PET_ON_REGROUPING);
if (mypet->IsPetStop()) {
mypet->SetPetStop(false);
SetPetCommandState(PET_BUTTON_STOP, 0);
}
}
break;
}
case PET_REGROUP_OFF: {
if (mypet->IsFeared()) break; //could be exploited like PET_BACKOFF
if (aabonuses.PetCommands[PetCommand]) {
mypet->SetPetRegroup(false);
mypet->SayString(this, Chat::PetResponse, PET_OFF_REGROUPING);
}
break;
}
default:
printf("Client attempted to use a unknown pet command:\n");
break;
}
}
void Client::Handle_OP_Petition(const EQApplicationPacket *app)
{
if (app->size <= 1)
return;
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
/*else if(petition_list.FindPetitionByAccountName(this->AccountName()))
{
Message(0,"You already have a petition in queue, you cannot petition again until this one has been responded to or you have deleted the petition.");
return;
}*/
else
{
if (petition_list.FindPetitionByAccountName(AccountName()))
{
Message(0, "You already have a petition in the queue, you must wait for it to be answered or use /deletepetition to delete it.");
return;
}
auto pet = new Petition(CharacterID());
pet->SetAName(this->AccountName());
pet->SetClass(this->GetClass());
pet->SetLevel(this->GetLevel());
pet->SetCName(this->GetName());
pet->SetRace(this->GetRace());
pet->SetLastGM("");
pet->SetCName(this->GetName());
pet->SetPetitionText((char*)app->pBuffer);
pet->SetZone(zone->GetZoneID());
pet->SetUrgency(0);
petition_list.AddPetition(pet);
database.InsertPetitionToDB(pet);
petition_list.UpdateGMQueue();
petition_list.UpdateZoneListQueue();
worldserver.SendEmoteMessage(0, 0, 80, 15, "%s has made a petition. #%i", GetName(), pet->GetID());
}
return;
}
void Client::Handle_OP_PetitionBug(const EQApplicationPacket *app)
{
Message(0, "Petition Bugs are not supported, please use /bug.");
return;
}
void Client::Handle_OP_PetitionCheckIn(const EQApplicationPacket *app)
{
if (app->size != sizeof(Petition_Struct)) {
LogError("Wrong size: OP_PetitionCheckIn, size=[{}], expected [{}]", app->size, sizeof(Petition_Struct));
return;
}
Petition_Struct* inpet = (Petition_Struct*)app->pBuffer;
Petition* pet = petition_list.GetPetitionByID(inpet->petnumber);
//if (inpet->urgency != pet->GetUrgency())
pet->SetUrgency(inpet->urgency);
pet->SetLastGM(this->GetName());
pet->SetGMText(inpet->gmtext);
pet->SetCheckedOut(false);
petition_list.UpdatePetition(pet);
petition_list.UpdateGMQueue();
petition_list.UpdateZoneListQueue();
return;
}
void Client::Handle_OP_PetitionCheckout(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
std::cout << "Wrong size: OP_PetitionCheckout, size=" << app->size << ", expected " << sizeof(uint32) << std::endl;
return;
}
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
uint32 getpetnum = *((uint32*)app->pBuffer);
Petition* getpet = petition_list.GetPetitionByID(getpetnum);
if (getpet != 0) {
getpet->AddCheckout();
getpet->SetCheckedOut(true);
getpet->SendPetitionToPlayer(this->CastToClient());
petition_list.UpdatePetition(getpet);
petition_list.UpdateGMQueue();
petition_list.UpdateZoneListQueue();
}
}
return;
}
void Client::Handle_OP_PetitionDelete(const EQApplicationPacket *app)
{
if (app->size != sizeof(PetitionUpdate_Struct)) {
LogError("Wrong size: OP_PetitionDelete, size=[{}], expected [{}]", app->size, sizeof(PetitionUpdate_Struct));
return;
}
auto outapp = new EQApplicationPacket(OP_PetitionUpdate, sizeof(PetitionUpdate_Struct));
PetitionUpdate_Struct* pet = (PetitionUpdate_Struct*)outapp->pBuffer;
pet->petnumber = *((int*)app->pBuffer);
pet->color = 0x00;
pet->status = 0xFFFFFFFF;
pet->senttime = 0;
strcpy(pet->accountid, "");
strcpy(pet->gmsenttoo, "");
pet->quetotal = petition_list.GetTotalPetitions();
strcpy(pet->charname, "");
FastQueuePacket(&outapp);
if (petition_list.DeletePetition(pet->petnumber) == -1)
std::cout << "Something is borked with: " << pet->petnumber << std::endl;
petition_list.ClearPetitions();
petition_list.UpdateGMQueue();
petition_list.ReadDatabase();
petition_list.UpdateZoneListQueue();
return;
}
void Client::Handle_OP_PetitionQue(const EQApplicationPacket *app)
{
#ifdef _EQDEBUG
printf("%s looking at petitions..\n", this->GetName());
#endif
return;
}
void Client::Handle_OP_PetitionRefresh(const EQApplicationPacket *app)
{
// This is When Client Asks for Petition Again and Again...
// break is here because it floods the zones and causes lag if it
// Were to actually do something:P We update on our own schedule now.
return;
}
void Client::Handle_OP_PetitionResolve(const EQApplicationPacket *app)
{
Handle_OP_PetitionDelete(app);
}
void Client::Handle_OP_PetitionUnCheckout(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
std::cout << "Wrong size: OP_PetitionUnCheckout, size=" << app->size << ", expected " << sizeof(uint32) << std::endl;
return;
}
if (!worldserver.Connected())
Message(0, "Error: World server disconnected");
else {
uint32 getpetnum = *((uint32*)app->pBuffer);
Petition* getpet = petition_list.GetPetitionByID(getpetnum);
if (getpet != 0) {
getpet->SetCheckedOut(false);
petition_list.UpdatePetition(getpet);
petition_list.UpdateGMQueue();
petition_list.UpdateZoneListQueue();
}
}
return;
}
void Client::Handle_OP_PlayerStateAdd(const EQApplicationPacket *app)
{
if (app->size != sizeof(PlayerState_Struct)) {
std::cout << "Wrong size: OP_PlayerStateAdd, size=" << app->size << ", expected " << sizeof(PlayerState_Struct) << std::endl;
return;
}
PlayerState_Struct *ps = (PlayerState_Struct *)app->pBuffer;
AddPlayerState(ps->state);
entity_list.QueueClients(this, app, true);
}
void Client::Handle_OP_PlayerStateRemove(const EQApplicationPacket *app)
{
if (app->size != sizeof(PlayerState_Struct)) {
std::cout << "Wrong size: OP_PlayerStateRemove, size=" << app->size << ", expected " << sizeof(PlayerState_Struct) << std::endl;
return;
}
PlayerState_Struct *ps = (PlayerState_Struct *)app->pBuffer;
RemovePlayerState(ps->state);
entity_list.QueueClients(this, app, true);
}
void Client::Handle_OP_PickPocket(const EQApplicationPacket *app)
{
if (app->size != sizeof(PickPocket_Struct))
{
LogError("Size mismatch for Pick Pocket packet");
DumpPacket(app);
}
if (!HasSkill(EQEmu::skills::SkillPickPockets))
{
return;
}
if (!p_timers.Expired(&database, pTimerBeggingPickPocket, false))
{
Message(Chat::Red, "Ability recovery time not yet met.");
database.SetMQDetectionFlag(this->AccountName(), this->GetName(), "OP_PickPocket was sent again too quickly.", zone->GetShortName());
return;
}
PickPocket_Struct* pick_in = (PickPocket_Struct*)app->pBuffer;
Mob* victim = entity_list.GetMob(pick_in->to);
if (!victim)
return;
p_timers.Start(pTimerBeggingPickPocket, 8);
if (victim == this) {
Message(0, "You catch yourself red-handed.");
auto outapp = new EQApplicationPacket(OP_PickPocket, sizeof(sPickPocket_Struct));
sPickPocket_Struct* pick_out = (sPickPocket_Struct*)outapp->pBuffer;
pick_out->coin = 0;
pick_out->from = victim->GetID();
pick_out->to = GetID();
pick_out->myskill = GetSkill(EQEmu::skills::SkillPickPockets);
pick_out->type = 0;
//if we do not send this packet the client will lock up and require the player to relog.
QueuePacket(outapp);
safe_delete(outapp);
}
else if (victim->GetOwnerID()) {
Message(0, "You cannot steal from pets!");
auto outapp = new EQApplicationPacket(OP_PickPocket, sizeof(sPickPocket_Struct));
sPickPocket_Struct* pick_out = (sPickPocket_Struct*)outapp->pBuffer;
pick_out->coin = 0;
pick_out->from = victim->GetID();
pick_out->to = GetID();
pick_out->myskill = GetSkill(EQEmu::skills::SkillPickPockets);
pick_out->type = 0;
//if we do not send this packet the client will lock up and require the player to relog.
QueuePacket(outapp);
safe_delete(outapp);
}
else if (victim->IsNPC()) {
victim->CastToNPC()->PickPocket(this);
}
else {
Message(0, "Stealing from clients not yet supported.");
auto outapp = new EQApplicationPacket(OP_PickPocket, sizeof(sPickPocket_Struct));
sPickPocket_Struct* pick_out = (sPickPocket_Struct*)outapp->pBuffer;
pick_out->coin = 0;
pick_out->from = victim->GetID();
pick_out->to = GetID();
pick_out->myskill = GetSkill(EQEmu::skills::SkillPickPockets);
pick_out->type = 0;
//if we do not send this packet the client will lock up and require the player to relog.
QueuePacket(outapp);
safe_delete(outapp);
}
}
void Client::Handle_OP_PopupResponse(const EQApplicationPacket *app)
{
if (app->size != sizeof(PopupResponse_Struct)) {
LogDebug("Size mismatch in OP_PopupResponse expected [{}] got [{}]", sizeof(PopupResponse_Struct), app->size);
DumpPacket(app);
return;
}
PopupResponse_Struct *popup_response = (PopupResponse_Struct *) app->pBuffer;
/**
* Handle any EQEmu defined popup Ids first
*/
switch (popup_response->popupid) {
case POPUPID_UPDATE_SHOWSTATSWINDOW:
if (GetTarget() && GetTarget()->IsClient()) {
GetTarget()->CastToClient()->SendStatsWindow(this, true);
}
else {
SendStatsWindow(this, true);
}
return;
break;
case EQEmu::popupresponse::MOB_INFO_DISMISS:
this->SetDisplayMobInfoWindow(false);
this->Message(Chat::Yellow, "[DevTools] Window snoozed in this zone...");
break;
default:
break;
}
char buf[16];
sprintf(buf, "%d", popup_response->popupid);
parse->EventPlayer(EVENT_POPUP_RESPONSE, this, buf, 0);
Mob *Target = GetTarget();
if (Target && Target->IsNPC()) {
parse->EventNPC(EVENT_POPUP_RESPONSE, Target->CastToNPC(), this, buf, 0);
}
}
void Client::Handle_OP_PotionBelt(const EQApplicationPacket *app)
{
if (app->size != sizeof(MovePotionToBelt_Struct)) {
LogDebug("Size mismatch in OP_PotionBelt expected [{}] got [{}]", sizeof(MovePotionToBelt_Struct), app->size);
DumpPacket(app);
return;
}
MovePotionToBelt_Struct *mptbs = (MovePotionToBelt_Struct*)app->pBuffer;
if (!EQEmu::ValueWithin(mptbs->SlotNumber, 0U, 3U)) {
LogDebug("Client::Handle_OP_PotionBelt mptbs->SlotNumber out of range");
return;
}
if (mptbs->Action == 0) {
const EQEmu::ItemData *BaseItem = database.GetItem(mptbs->ItemID);
if (BaseItem) {
m_pp.potionbelt.Items[mptbs->SlotNumber].ID = BaseItem->ID;
m_pp.potionbelt.Items[mptbs->SlotNumber].Icon = BaseItem->Icon;
strn0cpy(m_pp.potionbelt.Items[mptbs->SlotNumber].Name, BaseItem->Name, sizeof(BaseItem->Name));
database.SaveCharacterPotionBelt(this->CharacterID(), mptbs->SlotNumber, m_pp.potionbelt.Items[mptbs->SlotNumber].ID, m_pp.potionbelt.Items[mptbs->SlotNumber].Icon);
}
}
else {
m_pp.potionbelt.Items[mptbs->SlotNumber].ID = 0;
m_pp.potionbelt.Items[mptbs->SlotNumber].Icon = 0;
m_pp.potionbelt.Items[mptbs->SlotNumber].Name[0] = '\0';
}
}
void Client::Handle_OP_PurchaseLeadershipAA(const EQApplicationPacket *app)
{
if (app->size != sizeof(uint32)) {
LogDebug("Size mismatch in OP_LeadershipExpToggle expected [{}] got [{}]", 1, app->size);
DumpPacket(app);
return;
}
uint32 aaid = *((uint32 *)app->pBuffer);
if (aaid >= _maxLeaderAA)
return;
uint32 current_rank = m_pp.leader_abilities.ranks[aaid];
if (current_rank >= MAX_LEADERSHIP_TIERS) {
Message(Chat::Red, "This ability can be trained no further.");
return;
}
uint8 cost = LeadershipAACosts[aaid][current_rank];
if (cost == 0) {
Message(Chat::Red, "This ability can be trained no further.");
return;
}
//TODO: we need to enforce prerequisits
if (aaid >= raidAAMarkNPC) {
//it is a raid ability.
if (cost > m_pp.raid_leadership_points) {
Message(Chat::Red, "You do not have enough points to purchase this ability.");
return;
}
//sell them the ability.
m_pp.raid_leadership_points -= cost;
m_pp.leader_abilities.ranks[aaid]++;
database.SaveCharacterLeadershipAA(this->CharacterID(), &m_pp);
}
else {
//it is a group ability.
if (cost > m_pp.group_leadership_points) {
Message(Chat::Red, "You do not have enough points to purchase this ability.");
return;
}
//sell them the ability.
m_pp.group_leadership_points -= cost;
m_pp.leader_abilities.ranks[aaid]++;
database.SaveCharacterLeadershipAA(this->CharacterID(), &m_pp);
}
//success, send them an update
auto outapp = new EQApplicationPacket(OP_UpdateLeadershipAA, sizeof(UpdateLeadershipAA_Struct));
UpdateLeadershipAA_Struct *u = (UpdateLeadershipAA_Struct *)outapp->pBuffer;
u->ability_id = aaid;
u->new_rank = m_pp.leader_abilities.ranks[aaid];
if (aaid >= raidAAMarkNPC) // raid AA
u->pointsleft = m_pp.raid_leadership_points;
else // group AA
u->pointsleft = m_pp.group_leadership_points;
FastQueuePacket(&outapp);
// Update all group members with the new AA the leader has purchased.
if (IsRaidGrouped()) {
Raid *r = GetRaid();
if (!r)
return;
if (aaid >= raidAAMarkNPC) {
r->UpdateRaidAAs();
r->SendAllRaidLeadershipAA();
}
else {
uint32 gid = r->GetGroup(this);
r->UpdateGroupAAs(gid);
r->GroupUpdate(gid, false);
}
}
else if (IsGrouped()) {
Group *g = GetGroup();
if (!g)
return;
g->UpdateGroupAAs();
g->SendLeadershipAAUpdate();
}
}
void Client::Handle_OP_PVPLeaderBoardDetailsRequest(const EQApplicationPacket *app)
{
// This opcode is sent by the client when the player right clicks a name on the PVP leaderboard and sends
// further details about the selected player, e.g. Race/Class/AAs/Guild etc.
//
if (app->size != sizeof(PVPLeaderBoardDetailsRequest_Struct))
{
LogDebug("Size mismatch in OP_PVPLeaderBoardDetailsRequest expected [{}] got [{}]", sizeof(PVPLeaderBoardDetailsRequest_Struct), app->size);
DumpPacket(app);
return;
}
auto outapp = new EQApplicationPacket(OP_PVPLeaderBoardDetailsReply, sizeof(PVPLeaderBoardDetailsReply_Struct));
PVPLeaderBoardDetailsReply_Struct *pvplbdrs = (PVPLeaderBoardDetailsReply_Struct *)outapp->pBuffer;
// TODO: Record and send this data.
QueuePacket(outapp);
safe_delete(outapp);
}
void Client::Handle_OP_PVPLeaderBoardRequest(const EQApplicationPacket *app)
{
// This Opcode is sent by the client when the Leaderboard button on the PVP Stats window is pressed.
//
// It has a single uint32 payload which is the sort method:
//
// PVPSortByKills = 0, PVPSortByPoints = 1, PVPSortByInfamy = 2
//
if (app->size != sizeof(PVPLeaderBoardRequest_Struct))
{
LogDebug("Size mismatch in OP_PVPLeaderBoardRequest expected [{}] got [{}]", sizeof(PVPLeaderBoardRequest_Struct), app->size);
DumpPacket(app);
return;
}
/*PVPLeaderBoardRequest_Struct *pvplbrs = (PVPLeaderBoardRequest_Struct *)app->pBuffer;*/ //unused
auto outapp = new EQApplicationPacket(OP_PVPLeaderBoardReply, sizeof(PVPLeaderBoard_Struct));
/*PVPLeaderBoard_Struct *pvplb = (PVPLeaderBoard_Struct *)outapp->pBuffer;*/ //unused
// TODO: Record and send this data.
QueuePacket(outapp);
safe_delete(outapp);
}
void Client::Handle_OP_QueryUCSServerStatus(const EQApplicationPacket *app)
{
if (zone->IsUCSServerAvailable()) {
EQApplicationPacket* outapp = nullptr;
std::string buffer;
std::string MailKey = database.GetMailKey(CharacterID(), true);
EQEmu::versions::UCSVersion ConnectionType = EQEmu::versions::ucsUnknown;
// chat server packet
switch (ClientVersion()) {
case EQEmu::versions::ClientVersion::Titanium:
ConnectionType = EQEmu::versions::ucsTitaniumChat;
break;
case EQEmu::versions::ClientVersion::SoF:
ConnectionType = EQEmu::versions::ucsSoFCombined;
break;
case EQEmu::versions::ClientVersion::SoD:
ConnectionType = EQEmu::versions::ucsSoDCombined;
break;
case EQEmu::versions::ClientVersion::UF:
ConnectionType = EQEmu::versions::ucsUFCombined;
break;
case EQEmu::versions::ClientVersion::RoF:
ConnectionType = EQEmu::versions::ucsRoFCombined;
break;
case EQEmu::versions::ClientVersion::RoF2:
ConnectionType = EQEmu::versions::ucsRoF2Combined;
break;
default:
ConnectionType = EQEmu::versions::ucsUnknown;
break;
}
buffer = StringFormat("%s,%i,%s.%s,%c%s",
Config->ChatHost.c_str(),
Config->ChatPort,
Config->ShortName.c_str(),
GetName(),
ConnectionType,
MailKey.c_str()
);
outapp = new EQApplicationPacket(OP_SetChatServer, (buffer.length() + 1));
memcpy(outapp->pBuffer, buffer.c_str(), buffer.length());
outapp->pBuffer[buffer.length()] = '\0';
QueuePacket(outapp);
safe_delete(outapp);
// mail server packet
switch (ClientVersion()) {
case EQEmu::versions::ClientVersion::Titanium:
ConnectionType = EQEmu::versions::ucsTitaniumMail;
break;
default:
// retain value from previous switch
break;
}
buffer = StringFormat("%s,%i,%s.%s,%c%s",
Config->MailHost.c_str(),
Config->MailPort,
Config->ShortName.c_str(),
GetName(),
ConnectionType,
MailKey.c_str()
);
outapp = new EQApplicationPacket(OP_SetChatServer2, (buffer.length() + 1));
memcpy(outapp->pBuffer, buffer.c_str(), buffer.length());
outapp->pBuffer[buffer.length()] = '\0';
QueuePacket(outapp);
safe_delete(outapp);
}
}
void Client::Handle_OP_RaidCommand(const EQApplicationPacket *app)
{
if (app->size < sizeof(RaidGeneral_Struct)) {
LogError("Wrong size: OP_RaidCommand, size=[{}], expected at least [{}]", app->size, sizeof(RaidGeneral_Struct));
DumpPacket(app);
return;
}
RaidGeneral_Struct *raid_command_packet = (RaidGeneral_Struct*)app->pBuffer;
switch (raid_command_packet->action)
{
case RaidCommandInviteIntoExisting:
case RaidCommandInvite: {
Client *player_to_invite = entity_list.GetClientByName(raid_command_packet->player_name);
if (!player_to_invite)
break;
Group *player_to_invite_group = player_to_invite->GetGroup();
if (player_to_invite->HasRaid()) {
Message(Chat::Red, "%s is already in a raid.", player_to_invite->GetName());
break;
}
if (player_to_invite_group && player_to_invite_group->IsGroupMember(this)) {
MessageString(Chat::Red, ALREADY_IN_PARTY);
break;
}
if (player_to_invite_group && !player_to_invite_group->IsLeader(player_to_invite)) {
Message(Chat::Red, "You can only invite an ungrouped player or group leader to join your raid.");
break;
}
/* Send out invite to the client */
auto outapp = new EQApplicationPacket(OP_RaidUpdate, sizeof(RaidGeneral_Struct));
RaidGeneral_Struct *raid_command = (RaidGeneral_Struct*)outapp->pBuffer;
strn0cpy(raid_command->leader_name, raid_command_packet->leader_name, 64);
strn0cpy(raid_command->player_name, raid_command_packet->player_name, 64);
raid_command->parameter = 0;
raid_command->action = 20;
player_to_invite->QueuePacket(outapp);
safe_delete(outapp);
break;
}
case RaidCommandAcceptInvite: {
Client *player_accepting_invite = entity_list.GetClientByName(raid_command_packet->player_name);
if (player_accepting_invite) {
if (IsRaidGrouped()) {
player_accepting_invite->MessageString(Chat::White, ALREADY_IN_RAID, GetName()); //group failed, must invite members not in raid...
return;
}
Raid *raid = entity_list.GetRaidByClient(player_accepting_invite);
if (raid) {
raid->VerifyRaid();
Group *group = GetGroup();
if (group) {
if (group->GroupCount() + raid->RaidCount() > MAX_RAID_MEMBERS) {
player_accepting_invite->Message(Chat::Red, "Invite failed, group invite would create a raid larger than the maximum number of members allowed.");
return;
}
}
else {
if (1 + raid->RaidCount() > MAX_RAID_MEMBERS) {
player_accepting_invite->Message(Chat::Red, "Invite failed, member invite would create a raid larger than the maximum number of members allowed.");
return;
}
}
if (group) {//add us all
uint32 free_group_id = raid->GetFreeGroup();
Client *addClient = nullptr;
for (int x = 0; x < 6; x++) {
if (group->members[x]) {
Client *c = nullptr;
if (group->members[x]->IsClient())
c = group->members[x]->CastToClient();
else
continue;
if (!addClient)
{
addClient = c;
raid->SetGroupLeader(addClient->GetName());
}
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
if (group->IsLeader(group->members[x]))
raid->AddMember(c, free_group_id, false, true);
else
raid->AddMember(c, free_group_id);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
}
group->JoinRaidXTarget(raid);
group->DisbandGroup(true);
raid->GroupUpdate(free_group_id);
}
else {
raid->SendRaidCreate(this);
raid->SendMakeLeaderPacketTo(raid->leadername, this);
raid->AddMember(this);
raid->SendBulkRaid(this);
if (raid->IsLocked()) {
raid->SendRaidLockTo(this);
}
}
}
else
{
Group *player_invited_group = player_accepting_invite->GetGroup();
Group *group = GetGroup();
if (group) //if our target has a group
{
raid = new Raid(player_accepting_invite);
entity_list.AddRaid(raid);
raid->SetRaidDetails();
uint32 raid_free_group_id = raid->GetFreeGroup();
/* If we already have a group then cycle through adding us... */
if (player_invited_group) {
Client *client_to_be_leader = nullptr;
for (int x = 0; x < 6; x++) {
if (player_invited_group->members[x]) {
if (!client_to_be_leader) {
if (player_invited_group->members[x]->IsClient()) {
client_to_be_leader = player_invited_group->members[x]->CastToClient();
raid->SetGroupLeader(client_to_be_leader->GetName());
}
}
if (player_invited_group->IsLeader(player_invited_group->members[x])) {
Client *c = nullptr;
if (player_invited_group->members[x]->IsClient())
c = player_invited_group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, raid_free_group_id, true, true, true);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
else {
Client *c = nullptr;
if (player_invited_group->members[x]->IsClient())
c = player_invited_group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, raid_free_group_id);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
}
}
player_invited_group->JoinRaidXTarget(raid, true);
player_invited_group->DisbandGroup(true);
raid->GroupUpdate(raid_free_group_id);
raid_free_group_id = raid->GetFreeGroup();
}
else {
raid->SendRaidCreate(player_accepting_invite);
raid->AddMember(player_accepting_invite, 0xFFFFFFFF, true, false, true);
}
Client *client_to_add = nullptr;
/* Add client to an existing group */
for (int x = 0; x < 6; x++) {
if (group->members[x]) {
if (!client_to_add) {
if (group->members[x]->IsClient()) {
client_to_add = group->members[x]->CastToClient();
raid->SetGroupLeader(client_to_add->GetName());
}
}
if (group->IsLeader(group->members[x])) {
Client *c = nullptr;
if (group->members[x]->IsClient())
c = group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, raid_free_group_id, false, true);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
else
{
Client *c = nullptr;
if (group->members[x]->IsClient())
c = group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, raid_free_group_id);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
}
}
group->JoinRaidXTarget(raid);
group->DisbandGroup(true);
raid->GroupUpdate(raid_free_group_id);
}
/* Target does not have a group */
else {
if (player_invited_group) {
raid = new Raid(player_accepting_invite);
entity_list.AddRaid(raid);
raid->SetRaidDetails();
Client *addClientig = nullptr;
for (int x = 0; x < 6; x++) {
if (player_invited_group->members[x]) {
if (!addClientig) {
if (player_invited_group->members[x]->IsClient()) {
addClientig = player_invited_group->members[x]->CastToClient();
raid->SetGroupLeader(addClientig->GetName());
}
}
if (player_invited_group->IsLeader(player_invited_group->members[x])) {
Client *c = nullptr;
if (player_invited_group->members[x]->IsClient())
c = player_invited_group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, 0, true, true, true);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
else
{
Client *c = nullptr;
if (player_invited_group->members[x]->IsClient())
c = player_invited_group->members[x]->CastToClient();
else
continue;
raid->SendRaidCreate(c);
raid->SendMakeLeaderPacketTo(raid->leadername, c);
raid->AddMember(c, 0);
raid->SendBulkRaid(c);
if (raid->IsLocked()) {
raid->SendRaidLockTo(c);
}
}
}
}
raid->SendRaidCreate(this);
raid->SendMakeLeaderPacketTo(raid->leadername, this);
raid->SendBulkRaid(this);
player_invited_group->JoinRaidXTarget(raid, true);
raid->AddMember(this);
player_invited_group->DisbandGroup(true);
raid->GroupUpdate(0);
if (raid->IsLocked()) {
raid->SendRaidLockTo(this);
}
}
else { // neither has a group
raid = new Raid(player_accepting_invite);
entity_list.AddRaid(raid);
raid->SetRaidDetails();
raid->SendRaidCreate(player_accepting_invite);
raid->SendRaidCreate(this);
raid->SendMakeLeaderPacketTo(raid->leadername, this);
raid->AddMember(player_accepting_invite, 0xFFFFFFFF, true, false, true);
raid->SendBulkRaid(this);
raid->AddMember(this);
if (raid->IsLocked()) {
raid->SendRaidLockTo(this);
}
}
}
}
}
break;
}
case RaidCommandDisband: {
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
uint32 group = raid->GetGroup(raid_command_packet->leader_name);
if (group < 12) {
uint32 i = raid->GetPlayerIndex(raid_command_packet->leader_name);
if (raid->members[i].IsGroupLeader) { //assign group leader to someone else
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
if (strlen(raid->members[x].membername) > 0 && i != x) {
if (raid->members[x].GroupNumber == group) {
raid->SetGroupLeader(raid_command_packet->leader_name, false);
raid->SetGroupLeader(raid->members[x].membername);
raid->UpdateGroupAAs(group);
break;
}
}
}
}
if (raid->members[i].IsRaidLeader) {
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
if (strlen(raid->members[x].membername) > 0 && strcmp(raid->members[x].membername, raid->members[i].membername) != 0)
{
raid->SetRaidLeader(raid->members[i].membername, raid->members[x].membername);
raid->UpdateRaidAAs();
raid->SendAllRaidLeadershipAA();
break;
}
}
}
}
raid->RemoveMember(raid_command_packet->leader_name);
Client *c = entity_list.GetClientByName(raid_command_packet->leader_name);
if (c)
raid->SendGroupDisband(c);
else {
auto pack =
new ServerPacket(ServerOP_RaidGroupDisband, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct* rga = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
rga->rid = GetID();
rga->zoneid = zone->GetZoneID();
rga->instance_id = zone->GetInstanceID();
strn0cpy(rga->playername, raid_command_packet->leader_name, 64);
worldserver.SendPacket(pack);
safe_delete(pack);
}
//r->SendRaidGroupRemove(ri->leader_name, grp);
raid->GroupUpdate(group);// break
//}
}
break;
}
case RaidCommandMoveGroup:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
/* Moving to group */
if (raid_command_packet->parameter < 12) {
uint8 group_count = raid->GroupCount(raid_command_packet->parameter);
if (group_count < 6) {
Client *c = entity_list.GetClientByName(raid_command_packet->leader_name);
uint32 old_group = raid->GetGroup(raid_command_packet->leader_name);
if (raid_command_packet->parameter == old_group) //don't rejoin grp if we order to join same group.
break;
if (raid->members[raid->GetPlayerIndex(raid_command_packet->leader_name)].IsGroupLeader) {
raid->SetGroupLeader(raid_command_packet->leader_name, false);
/* We were the leader of our old group */
if (old_group < 12) {
/* Assign new group leader if we can */
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
if (raid->members[x].GroupNumber == old_group) {
if (strcmp(raid_command_packet->leader_name, raid->members[x].membername) != 0 && strlen(raid_command_packet->leader_name) > 0) {
raid->SetGroupLeader(raid->members[x].membername);
raid->UpdateGroupAAs(old_group);
Client *client_to_update = entity_list.GetClientByName(raid->members[x].membername);
if (client_to_update) {
raid->SendRaidRemove(raid->members[x].membername, client_to_update);
raid->SendRaidCreate(client_to_update);
raid->SendMakeLeaderPacketTo(raid->leadername, client_to_update);
raid->SendRaidAdd(raid->members[x].membername, client_to_update);
raid->SendBulkRaid(client_to_update);
if (raid->IsLocked()) {
raid->SendRaidLockTo(client_to_update);
}
}
else {
auto pack = new ServerPacket(ServerOP_RaidChangeGroup, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct *raid_command_packet = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
raid_command_packet->rid = raid->GetID();
raid_command_packet->zoneid = zone->GetZoneID();
raid_command_packet->instance_id = zone->GetInstanceID();
strn0cpy(raid_command_packet->playername, raid->members[x].membername, 64);
worldserver.SendPacket(pack);
safe_delete(pack);
}
break;
}
}
}
}
}
if (group_count == 0) {
raid->SetGroupLeader(raid_command_packet->leader_name);
raid->UpdateGroupAAs(raid_command_packet->parameter);
}
raid->MoveMember(raid_command_packet->leader_name, raid_command_packet->parameter);
if (c) {
raid->SendGroupDisband(c);
}
else {
auto pack = new ServerPacket(ServerOP_RaidGroupDisband, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct* raid_command = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
raid_command->rid = raid->GetID();
raid_command->zoneid = zone->GetZoneID();
raid_command->instance_id = zone->GetInstanceID();
strn0cpy(raid_command->playername, raid_command_packet->leader_name, 64);
worldserver.SendPacket(pack);
safe_delete(pack);
}
/* Send group update to our new group */
raid->GroupUpdate(raid_command_packet->parameter);
/* If our old was a group send update there too */
if (old_group < 12)
raid->GroupUpdate(old_group);
}
}
/* Move player to ungrouped bank */
else {
Client *c = entity_list.GetClientByName(raid_command_packet->leader_name);
uint32 oldgrp = raid->GetGroup(raid_command_packet->leader_name);
if (raid->members[raid->GetPlayerIndex(raid_command_packet->leader_name)].IsGroupLeader) {
raid->SetGroupLeader(raid_command_packet->leader_name, false);
for (int x = 0; x < MAX_RAID_MEMBERS; x++) {
if (raid->members[x].GroupNumber == oldgrp && strlen(raid->members[x].membername) > 0 && strcmp(raid->members[x].membername, raid_command_packet->leader_name) != 0){
raid->SetGroupLeader(raid->members[x].membername);
raid->UpdateGroupAAs(oldgrp);
Client *client_leaving_group = entity_list.GetClientByName(raid->members[x].membername);
if (client_leaving_group) {
raid->SendRaidRemove(raid->members[x].membername, client_leaving_group);
raid->SendRaidCreate(client_leaving_group);
raid->SendMakeLeaderPacketTo(raid->leadername, client_leaving_group);
raid->SendRaidAdd(raid->members[x].membername, client_leaving_group);
raid->SendBulkRaid(client_leaving_group);
if (raid->IsLocked()) {
raid->SendRaidLockTo(client_leaving_group);
}
}
else {
auto pack = new ServerPacket( ServerOP_RaidChangeGroup, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct *raid_command = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
raid_command->rid = raid->GetID();
strn0cpy(raid_command->playername, raid->members[x].membername, 64);
raid_command->zoneid = zone->GetZoneID();
raid_command->instance_id = zone->GetInstanceID();
worldserver.SendPacket(pack);
safe_delete(pack);
}
break;
}
}
}
raid->MoveMember(raid_command_packet->leader_name, 0xFFFFFFFF);
if (c) {
raid->SendGroupDisband(c);
}
else {
auto pack = new ServerPacket(ServerOP_RaidGroupDisband, sizeof(ServerRaidGeneralAction_Struct));
ServerRaidGeneralAction_Struct* raid_command = (ServerRaidGeneralAction_Struct*)pack->pBuffer;
raid_command->rid = raid->GetID();
raid_command->zoneid = zone->GetZoneID();
raid_command->instance_id = zone->GetInstanceID();
strn0cpy(raid_command->playername, raid_command_packet->leader_name, 64);
worldserver.SendPacket(pack);
safe_delete(pack);
}
raid->GroupUpdate(oldgrp);
}
}
Client *client_moved = entity_list.GetClientByName(raid_command_packet->leader_name);
if (client_moved && client_moved->GetRaid()) {
client_moved->GetRaid()->SendHPManaEndPacketsTo(client_moved);
client_moved->GetRaid()->SendHPManaEndPacketsFrom(client_moved);
Log(Logs::General, Logs::HPUpdate,
"Client::Handle_OP_RaidCommand :: %s sending and recieving HP/Mana/End updates",
client_moved->GetCleanName()
);
}
break;
}
case RaidCommandRaidLock:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
if (!raid->IsLocked())
raid->LockRaid(true);
else
raid->SendRaidLockTo(this);
}
break;
}
case RaidCommandRaidUnlock:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid)
{
if (raid->IsLocked())
raid->LockRaid(false);
else
raid->SendRaidUnlockTo(this);
}
break;
}
case RaidCommandLootType2:
case RaidCommandLootType:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
Message(Chat::Yellow, "Loot type changed to: %d.", raid_command_packet->parameter);
raid->ChangeLootType(raid_command_packet->parameter);
}
break;
}
case RaidCommandAddLooter2:
case RaidCommandAddLooter:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
Message(Chat::Yellow, "Adding %s as a raid looter.", raid_command_packet->leader_name);
raid->AddRaidLooter(raid_command_packet->leader_name);
}
break;
}
case RaidCommandRemoveLooter2:
case RaidCommandRemoveLooter:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
Message(Chat::Yellow, "Removing %s as a raid looter.", raid_command_packet->leader_name);
raid->RemoveRaidLooter(raid_command_packet->leader_name);
}
break;
}
case RaidCommandMakeLeader:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (raid) {
if (strcmp(raid->leadername, GetName()) == 0) {
raid->SetRaidLeader(GetName(), raid_command_packet->leader_name);
raid->UpdateRaidAAs();
raid->SendAllRaidLeadershipAA();
}
}
break;
}
case RaidCommandSetMotd:
{
Raid *raid = entity_list.GetRaidByClient(this);
if (!raid)
break;
// we don't use the RaidGeneral here!
RaidMOTD_Struct *motd = (RaidMOTD_Struct *)app->pBuffer;
raid->SetRaidMOTD(std::string(motd->motd));
raid->SaveRaidMOTD();
raid->SendRaidMOTDToWorld();
break;
}
default: {
Message(Chat::Red, "Raid command (%d) NYI", raid_command_packet->action);
break;
}
}
}
void Client::Handle_OP_RandomReq(const EQApplicationPacket *app)
{
if (app->size != sizeof(RandomReq_Struct)) {
LogError("Wrong size: OP_RandomReq, size=[{}], expected [{}]", app->size, sizeof(RandomReq_Struct));
return;
}
const RandomReq_Struct* rndq = (const RandomReq_Struct*)app->pBuffer;
uint32 randLow = rndq->low > rndq->high ? rndq->high : rndq->low;
uint32 randHigh = rndq->low > rndq->high ? rndq->low : rndq->high;
uint32 randResult;
if (randLow == 0 && randHigh == 0)
{ // defaults
randLow = 0;
randHigh = 100;
}
randResult = zone->random.Int(randLow, randHigh);
auto outapp = new EQApplicationPacket(OP_RandomReply, sizeof(RandomReply_Struct));
RandomReply_Struct* rr = (RandomReply_Struct*)outapp->pBuffer;
rr->low = randLow;
rr->high = randHigh;
rr->result = randResult;
strcpy(rr->name, GetName());
entity_list.QueueCloseClients(this, outapp, false, 400);
safe_delete(outapp);
return;
}
void Client::Handle_OP_ReadBook(const EQApplicationPacket *app)
{
if (app->size != sizeof(BookRequest_Struct)) {
LogError("Wrong size: OP_ReadBook, size=[{}], expected [{}]", app->size, sizeof(BookRequest_Struct));
return;
}
BookRequest_Struct* book = (BookRequest_Struct*)app->pBuffer;
ReadBook(book);
if (ClientVersion() >= EQEmu::versions::ClientVersion::SoF)
{
EQApplicationPacket EndOfBook(OP_FinishWindow, 0);
QueuePacket(&EndOfBook);
}
return;
}
void Client::Handle_OP_RecipeAutoCombine(const EQApplicationPacket *app)
{
if (app->size != sizeof(RecipeAutoCombine_Struct)) {
LogError("Invalid size for RecipeAutoCombine_Struct: Expected: [{}], Got: [{}]",
sizeof(RecipeAutoCombine_Struct), app->size);
return;
}
RecipeAutoCombine_Struct* rac = (RecipeAutoCombine_Struct*)app->pBuffer;
Object::HandleAutoCombine(this, rac);
return;
}
void Client::Handle_OP_RecipeDetails(const EQApplicationPacket *app)
{
if (app->size < sizeof(uint32)) {
LogError("Invalid size for RecipeDetails Request: Expected: [{}], Got: [{}]",
sizeof(uint32), app->size);
return;
}
uint32 *recipe_id = (uint32*)app->pBuffer;
SendTradeskillDetails(*recipe_id);
return;
}
void Client::Handle_OP_RecipesFavorite(const EQApplicationPacket *app)
{
if (app->size != sizeof(TradeskillFavorites_Struct)) {
LogError("Invalid size for TradeskillFavorites_Struct: Expected: [{}], Got: [{}]",
sizeof(TradeskillFavorites_Struct), app->size);
return;
}
TradeskillFavorites_Struct* tsf = (TradeskillFavorites_Struct*)app->pBuffer;
LogDebug("Requested Favorites for: [{}] - [{}]\n", tsf->object_type, tsf->some_id);
// results show that object_type is combiner type
// some_id = 0 if world combiner, item number otherwise
// make where clause segment for container(s)
std::string containers;
uint32 combineObjectSlots;
if (tsf->some_id == 0) {
containers += StringFormat(" = %u ", tsf->object_type); // world combiner so no item number
combineObjectSlots = 10;
}
else {
containers += StringFormat(" in (%u, %u) ", tsf->object_type, tsf->some_id); // container in inventory
auto item = database.GetItem(tsf->some_id);
if (!item)
{
LogError("Invalid container ID: [{}]. GetItem returned null. Defaulting to BagSlots = 10.\n", tsf->some_id);
combineObjectSlots = 10;
}
else
{
combineObjectSlots = item->BagSlots;
}
}
std::string favoriteIDs; //gotta be big enough for 500 IDs
bool first = true;
//Assumes item IDs are <10 characters long
for (uint16 favoriteIndex = 0; favoriteIndex < 500; ++favoriteIndex) {
if (tsf->favorite_recipes[favoriteIndex] == 0)
continue;
if (first) {
favoriteIDs += StringFormat("%u", tsf->favorite_recipes[favoriteIndex]);
first = false;
}
else
favoriteIDs += StringFormat(",%u", tsf->favorite_recipes[favoriteIndex]);
}
if (first) //no favorites....
return;
const std::string query = StringFormat("SELECT tr.id, tr.name, tr.trivial, "
"SUM(tre.componentcount), crl.madecount,tr.tradeskill "
"FROM tradeskill_recipe AS tr "
"LEFT JOIN tradeskill_recipe_entries AS tre ON tr.id=tre.recipe_id "
"LEFT JOIN (SELECT recipe_id, madecount "
"FROM char_recipe_list "
"WHERE char_id = %u) AS crl ON tr.id=crl.recipe_id "
"WHERE tr.enabled <> 0 AND tr.id IN (%s) "
"AND tr.must_learn & 0x20 <> 0x20 AND "
"((tr.must_learn & 0x3 <> 0 AND crl.madecount IS NOT NULL) "
"OR (tr.must_learn & 0x3 = 0)) "
"GROUP BY tr.id "
"HAVING sum(if(tre.item_id %s AND tre.iscontainer > 0,1,0)) > 0 AND SUM(tre.componentcount) <= %u "
"LIMIT 100 ", CharacterID(), favoriteIDs.c_str(), containers.c_str(), combineObjectSlots);
TradeskillSearchResults(query, tsf->object_type, tsf->some_id);
return;
}
void Client::Handle_OP_RecipesSearch(const EQApplicationPacket *app)
{
if (app->size != sizeof(RecipesSearch_Struct)) {
LogError("Invalid size for RecipesSearch_Struct: Expected: [{}], Got: [{}]",
sizeof(RecipesSearch_Struct), app->size);
return;
}
RecipesSearch_Struct* rss = (RecipesSearch_Struct*)app->pBuffer;
rss->query[55] = '\0'; //just to be sure.
LogDebug("Requested search recipes for: [{}] - [{}]\n", rss->object_type, rss->some_id);
// make where clause segment for container(s)
char containers[30];
uint32 combineObjectSlots;
if (rss->some_id == 0) {
// world combiner so no item number
snprintf(containers, 29, "= %u", rss->object_type);
combineObjectSlots = 10;
}
else {
// container in inventory
snprintf(containers, 29, "in (%u,%u)", rss->object_type, rss->some_id);
auto item = database.GetItem(rss->some_id);
if (!item)
{
LogError("Invalid container ID: [{}]. GetItem returned null. Defaulting to BagSlots = 10.\n", rss->some_id);
combineObjectSlots = 10;
}
else
{
combineObjectSlots = item->BagSlots;
}
}
std::string searchClause;
//omit the rlike clause if query is empty
if (rss->query[0] != 0) {
char buf[120]; //larger than 2X rss->query
database.DoEscapeString(buf, rss->query, strlen(rss->query));
searchClause = StringFormat("name rlike '%s' AND", buf);
}
//arbitrary limit of 200 recipes, makes sense to me.
const std::string query = StringFormat("SELECT tr.id, tr.name, tr.trivial, "
"SUM(tre.componentcount), crl.madecount,tr.tradeskill "
"FROM tradeskill_recipe AS tr "
"LEFT JOIN tradeskill_recipe_entries AS tre ON tr.id = tre.recipe_id "
"LEFT JOIN (SELECT recipe_id, madecount "
"FROM char_recipe_list WHERE char_id = %u) AS crl ON tr.id=crl.recipe_id "
"WHERE %s tr.trivial >= %u AND tr.trivial <= %u AND tr.enabled <> 0 "
"AND tr.must_learn & 0x20 <> 0x20 "
"AND ((tr.must_learn & 0x3 <> 0 "
"AND crl.madecount IS NOT NULL) "
"OR (tr.must_learn & 0x3 = 0)) "
"GROUP BY tr.id "
"HAVING sum(if(tre.item_id %s AND tre.iscontainer > 0,1,0)) > 0 AND SUM(tre.componentcount) <= %u "
"LIMIT 200 ",
CharacterID(), searchClause.c_str(),
rss->mintrivial, rss->maxtrivial, containers, combineObjectSlots);
TradeskillSearchResults(query, rss->object_type, rss->some_id);
return;
}
void Client::Handle_OP_ReloadUI(const EQApplicationPacket *app)
{
if (IsInAGuild())
{
SendGuildRanks();
SendGuildMembers();
}
return;
}
void Client::Handle_OP_RemoveBlockedBuffs(const EQApplicationPacket *app)
{
if (!RuleB(Spells, EnableBlockedBuffs))
return;
if (app->size != sizeof(BlockedBuffs_Struct))
{
LogDebug("Size mismatch in OP_RemoveBlockedBuffs expected [{}] got [{}]", sizeof(BlockedBuffs_Struct), app->size);
DumpPacket(app);
return;
}
BlockedBuffs_Struct *bbs = (BlockedBuffs_Struct*)app->pBuffer;
std::set<uint32> *BlockedBuffs = bbs->Pet ? &PetBlockedBuffs : &PlayerBlockedBuffs;
std::set<uint32> RemovedBuffs;
if (bbs->Count > 0)
{
std::set<uint32>::iterator Iterator;
auto outapp = new EQApplicationPacket(OP_RemoveBlockedBuffs, sizeof(BlockedBuffs_Struct));
BlockedBuffs_Struct *obbs = (BlockedBuffs_Struct*)outapp->pBuffer;
for (unsigned int i = 0; i < BLOCKED_BUFF_COUNT; ++i)
obbs->SpellID[i] = 0;
obbs->Pet = bbs->Pet;
obbs->Initialise = 0;
obbs->Flags = 0x5a;
for (unsigned int i = 0; i < bbs->Count; ++i)
{
Iterator = BlockedBuffs->find(bbs->SpellID[i]);
if (Iterator != BlockedBuffs->end())
{
RemovedBuffs.insert(bbs->SpellID[i]);
BlockedBuffs->erase(Iterator);
}
}
obbs->Count = RemovedBuffs.size();
Iterator = RemovedBuffs.begin();
unsigned int Element = 0;
while (Iterator != RemovedBuffs.end())
{
obbs->SpellID[Element++] = (*Iterator);
++Iterator;
}
FastQueuePacket(&outapp);
}
}
void Client::Handle_OP_RemoveTrap(const EQApplicationPacket *app)
{
if (app->size != 4) {// just an int
LogDebug("Size mismatch in OP_RemoveTrap expected 4 got [{}]", app->size);
DumpPacket(app);
return;
}
auto id = app->ReadUInt32(0);
bool good = false;
for (int i = 0; i < trap_mgr.count; ++i) {
if (trap_mgr.auras[i].spawn_id == id) {
good = true;
break;
}
}
if (good)
RemoveAura(id);
else
MessageString(Chat::SpellFailure, NOT_YOUR_TRAP); // pretty sure this was red
}
void Client::Handle_OP_Report(const EQApplicationPacket *app)
{
if (!CanUseReport)
{
MessageString(Chat::System, REPORT_ONCE);
return;
}
uint32 size = app->size;
uint32 current_point = 0;
std::string reported, reporter;
std::string current_string;
int mode = 0;
while (current_point < size)
{
if (mode < 2)
{
if (app->pBuffer[current_point] == '|')
{
mode++;
}
else
{
if (mode == 0)
{
reported += app->pBuffer[current_point];
}
else
{
reporter += app->pBuffer[current_point];
}
}
current_point++;
}
else
{
if (app->pBuffer[current_point] == 0x0a)
{
current_string += '\n';
}
else if (app->pBuffer[current_point] == 0x00)
{
CanUseReport = false;
database.AddReport(reporter, reported, current_string);
return;
}
else
{
current_string += app->pBuffer[current_point];
}
current_point++;
}
}
CanUseReport = false;
database.AddReport(reporter, reported, current_string);
}
void Client::Handle_OP_RequestDuel(const EQApplicationPacket *app)
{
if (app->size != sizeof(Duel_Struct))
return;
EQApplicationPacket* outapp = app->Copy();
Duel_Struct* ds = (Duel_Struct*)outapp->pBuffer;
uint32 duel = ds->duel_initiator;
ds->duel_initiator = ds->duel_target;
ds->duel_target = duel;
Entity* entity = entity_list.GetID(ds->duel_target);
if (GetID() != ds->duel_target && entity->IsClient() && (entity->CastToClient()->IsDueling() && entity->CastToClient()->GetDuelTarget() != 0)) {
MessageString(Chat::NPCQuestSay, DUEL_CONSIDERING, entity->GetName());
return;
}
if (IsDueling()) {
MessageString(Chat::NPCQuestSay, DUEL_INPROGRESS);
return;
}
if (GetID() != ds->duel_target && entity->IsClient() && GetDuelTarget() == 0 && !IsDueling() && !entity->CastToClient()->IsDueling() && entity->CastToClient()->GetDuelTarget() == 0) {
SetDuelTarget(ds->duel_target);
entity->CastToClient()->SetDuelTarget(GetID());
ds->duel_target = ds->duel_initiator;
entity->CastToClient()->FastQueuePacket(&outapp);
entity->CastToClient()->SetDueling(false);
SetDueling(false);
}
else
safe_delete(outapp);
return;
}
void Client::Handle_OP_RequestTitles(const EQApplicationPacket *app)
{
EQApplicationPacket *outapp = title_manager.MakeTitlesPacket(this);
if (outapp != nullptr)
FastQueuePacket(&outapp);
}
void Client::Handle_OP_RespawnWindow(const EQApplicationPacket *app)
{
// This opcode is sent by the client when the player choses which bind to return to.
// The client sends just a 4 byte packet with the selection number in it
//
if (app->size != 4)
{
LogDebug("Size mismatch in OP_RespawnWindow expected [{}] got [{}]", 4, app->size);
DumpPacket(app);
return;
}
char *Buffer = (char *)app->pBuffer;
uint32 Option = VARSTRUCT_DECODE_TYPE(uint32, Buffer);
HandleRespawnFromHover(Option);
}
void Client::Handle_OP_Rewind(const EQApplicationPacket *app)
{
if ((rewind_timer.GetRemainingTime() > 1 && rewind_timer.Enabled())) {
MessageString(Chat::System, REWIND_WAIT);
}
else {
CastToClient()->MovePC(zone->GetZoneID(), zone->GetInstanceID(), m_RewindLocation.x, m_RewindLocation.y, m_RewindLocation.z, 0, 2, Rewind);
rewind_timer.Start(30000, true);
}
}
void Client::Handle_OP_RezzAnswer(const EQApplicationPacket *app)
{
VERIFY_PACKET_LENGTH(OP_RezzAnswer, app, Resurrect_Struct);
const Resurrect_Struct* ra = (const Resurrect_Struct*)app->pBuffer;
LogSpells("Received OP_RezzAnswer from client. Pendingrezzexp is [{}], action is [{}]",
PendingRezzXP, ra->action ? "ACCEPT" : "DECLINE");
OPRezzAnswer(ra->action, ra->spellid, ra->zone_id, ra->instance_id, ra->x, ra->y, ra->z);
if (ra->action == 1)
{
EQApplicationPacket* outapp = app->Copy();
// Send the OP_RezzComplete to the world server. This finds it's way to the zone that
// the rezzed corpse is in to mark the corpse as rezzed.
outapp->SetOpcode(OP_RezzComplete);
worldserver.RezzPlayer(outapp, 0, 0, OP_RezzComplete);
safe_delete(outapp);
}
return;
}
void Client::Handle_OP_Sacrifice(const EQApplicationPacket *app)
{
if (app->size != sizeof(Sacrifice_Struct)) {
LogDebug("Size mismatch in OP_Sacrifice expected [{}] got [{}]", sizeof(Sacrifice_Struct), app->size);
DumpPacket(app);
return;
}
Sacrifice_Struct *ss = (Sacrifice_Struct*)app->pBuffer;
if (!PendingSacrifice) {
LogError("Unexpected OP_Sacrifice reply");
DumpPacket(app);
return;
}
if (ss->Confirm) {
Client *Caster = entity_list.GetClientByName(SacrificeCaster.c_str());
if (Caster) Sacrifice(Caster);
}
PendingSacrifice = false;
SacrificeCaster.clear();
}
void Client::Handle_OP_SafeFallSuccess(const EQApplicationPacket *app) // bit of a misnomer, sent whenever safe fall is used (success of fail)
{
if (HasSkill(EQEmu::skills::SkillSafeFall)) //this should only get called if the client has safe fall, but just in case...
CheckIncreaseSkill(EQEmu::skills::SkillSafeFall, nullptr); //check for skill up
}
void Client::Handle_OP_SafePoint(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_Save(const EQApplicationPacket *app)
{
// The payload is 192 bytes - Not sure what is contained in payload
Save();
return;
}
void Client::Handle_OP_SaveOnZoneReq(const EQApplicationPacket *app)
{
Handle_OP_Save(app);
}
void Client::Handle_OP_SelectTribute(const EQApplicationPacket *app)
{
LogTribute("Received OP_SelectTribute of length [{}]", app->size);
//we should enforce being near a real tribute master to change this
//but im not sure how I wanna do that right now.
if (app->size != sizeof(SelectTributeReq_Struct))
LogError("Invalid size on OP_SelectTribute packet");
else {
SelectTributeReq_Struct *t = (SelectTributeReq_Struct *)app->pBuffer;
SendTributeDetails(t->client_id, t->tribute_id);
}
return;
}
void Client::Handle_OP_SenseHeading(const EQApplicationPacket *app)
{
if (!HasSkill(EQEmu::skills::SkillSenseHeading))
return;
int chancemod = 0;
CheckIncreaseSkill(EQEmu::skills::SkillSenseHeading, nullptr, chancemod);
return;
}
void Client::Handle_OP_SenseTraps(const EQApplicationPacket *app)
{
if (!HasSkill(EQEmu::skills::SkillSenseTraps))
return;
if (!p_timers.Expired(&database, pTimerSenseTraps, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
int reuse = SenseTrapsReuseTime - GetSkillReuseTime(EQEmu::skills::SkillSenseTraps);
if (reuse < 1)
reuse = 1;
p_timers.Start(pTimerSenseTraps, reuse - 1);
float trap_curdist = 0;
Trap* trap = entity_list.FindNearbyTrap(this, 800, trap_curdist);
CheckIncreaseSkill(EQEmu::skills::SkillSenseTraps, nullptr);
if (trap && trap->skill > 0) {
int uskill = GetSkill(EQEmu::skills::SkillSenseTraps);
if ((zone->random.Int(0, 99) + uskill) >= (zone->random.Int(0, 99) + trap->skill*0.75))
{
auto diff = trap->m_Position - glm::vec3(GetPosition());
if (diff.x == 0 && diff.y == 0)
Message(Chat::Skills, "You sense a trap right under your feet!");
else if (diff.x > 10 && diff.y > 10)
Message(Chat::Skills, "You sense a trap to the NorthWest.");
else if (diff.x < -10 && diff.y > 10)
Message(Chat::Skills, "You sense a trap to the NorthEast.");
else if (diff.y > 10)
Message(Chat::Skills, "You sense a trap to the North.");
else if (diff.x > 10 && diff.y < -10)
Message(Chat::Skills, "You sense a trap to the SouthWest.");
else if (diff.x < -10 && diff.y < -10)
Message(Chat::Skills, "You sense a trap to the SouthEast.");
else if (diff.y < -10)
Message(Chat::Skills, "You sense a trap to the South.");
else if (diff.x > 10)
Message(Chat::Skills, "You sense a trap to the West.");
else
Message(Chat::Skills, "You sense a trap to the East.");
trap->detected = true;
float angle = CalculateHeadingToTarget(trap->m_Position.x, trap->m_Position.y);
if (angle < 0)
angle = (256 + angle);
angle *= 2;
MovePC(zone->GetZoneID(), zone->GetInstanceID(), GetX(), GetY(), GetZ(), angle);
return;
}
}
Message(Chat::Skills, "You did not find any traps nearby.");
return;
}
void Client::Handle_OP_SetGuildMOTD(const EQApplicationPacket *app)
{
LogGuilds("Received OP_SetGuildMOTD");
if (app->size != sizeof(GuildMOTD_Struct)) {
// client calls for a motd on login even if they arent in a guild
printf("Error: app size of %i != size of GuildMOTD_Struct of %zu\n", app->size, sizeof(GuildMOTD_Struct));
return;
}
if (!IsInAGuild()) {
Message(Chat::Red, "You are not in a guild!");
return;
}
if (!guild_mgr.CheckPermission(GuildID(), GuildRank(), GUILD_MOTD)) {
Message(Chat::Red, "You do not have permissions to edit your guild's MOTD.");
return;
}
GuildMOTD_Struct* gmotd = (GuildMOTD_Struct*)app->pBuffer;
LogGuilds("Setting MOTD for [{}] ([{}]) to: [{}] - [{}]",
guild_mgr.GetGuildName(GuildID()), GuildID(), GetName(), gmotd->motd);
if (!guild_mgr.SetGuildMOTD(GuildID(), gmotd->motd, GetName())) {
Message(0, "Motd update failed.");
}
return;
}
void Client::Handle_OP_SetRunMode(const EQApplicationPacket *app)
{
if (app->size < sizeof(SetRunMode_Struct)) {
LogError("Received invalid sized OP_SetRunMode: got [{}], expected [{}]", app->size, sizeof(SetRunMode_Struct));
DumpPacket(app);
return;
}
SetRunMode_Struct* rms = (SetRunMode_Struct*)app->pBuffer;
if (rms->mode)
runmode = true;
else
runmode = false;
return;
}
void Client::Handle_OP_SetServerFilter(const EQApplicationPacket *app)
{
if (app->size != sizeof(SetServerFilter_Struct)) {
LogError("Received invalid sized OP_SetServerFilter: got [{}], expected [{}]", app->size, sizeof(SetServerFilter_Struct));
DumpPacket(app);
return;
}
SetServerFilter_Struct* filter = (SetServerFilter_Struct*)app->pBuffer;
ServerFilter(filter);
return;
}
void Client::Handle_OP_SetStartCity(const EQApplicationPacket *app)
{
// if the character has a start city, don't let them use the command
if (m_pp.binds[4].zoneId != 0 && m_pp.binds[4].zoneId != 189) {
Message(Chat::Yellow, "Your home city has already been set.", m_pp.binds[4].zoneId, database.GetZoneName(m_pp.binds[4].zoneId));
return;
}
if (app->size < 1) {
LogError("Wrong size: OP_SetStartCity, size=[{}], expected [{}]", app->size, 1);
DumpPacket(app);
return;
}
float x(0), y(0), z(0);
uint32 zoneid = 0;
uint32 startCity = (uint32)strtol((const char*)app->pBuffer, nullptr, 10);
std::string query = StringFormat("SELECT zone_id, bind_id, x, y, z FROM start_zones "
"WHERE player_class=%i AND player_deity=%i AND player_race=%i",
m_pp.class_, m_pp.deity, m_pp.race);
auto results = database.QueryDatabase(query);
if (!results.Success()) {
LogError("No valid start zones found for /setstartcity");
return;
}
bool validCity = false;
for (auto row = results.begin(); row != results.end(); ++row) {
if (atoi(row[1]) != 0)
zoneid = atoi(row[1]);
else
zoneid = atoi(row[0]);
if (zoneid != startCity)
continue;
validCity = true;
x = atof(row[2]);
y = atof(row[3]);
z = atof(row[4]);
}
if (validCity) {
Message(Chat::Yellow, "Your home city has been set");
SetStartZone(startCity, x, y, z);
return;
}
query = StringFormat("SELECT zone_id, bind_id FROM start_zones "
"WHERE player_class=%i AND player_deity=%i AND player_race=%i",
m_pp.class_, m_pp.deity, m_pp.race);
results = database.QueryDatabase(query);
if (!results.Success())
return;
Message(Chat::Yellow, "Use \"/setstartcity #\" to choose a home city from the following list:");
for (auto row = results.begin(); row != results.end(); ++row) {
if (atoi(row[1]) != 0)
zoneid = atoi(row[1]);
else
zoneid = atoi(row[0]);
char* name = nullptr;
database.GetZoneLongName(database.GetZoneName(zoneid), &name);
Message(Chat::Yellow, "%d - %s", zoneid, name);
}
}
void Client::Handle_OP_SetTitle(const EQApplicationPacket *app)
{
if (app->size != sizeof(SetTitle_Struct)) {
LogDebug("Size mismatch in OP_SetTitle expected [{}] got [{}]", sizeof(SetTitle_Struct), app->size);
DumpPacket(app);
return;
}
SetTitle_Struct *sts = (SetTitle_Struct *)app->pBuffer;
std::string Title;
if (!sts->is_suffix)
{
Title = title_manager.GetPrefix(sts->title_id);
SetAATitle(Title.c_str());
}
else
{
Title = title_manager.GetSuffix(sts->title_id);
SetTitleSuffix(Title.c_str());
}
}
void Client::Handle_OP_Shielding(const EQApplicationPacket *app)
{
if (app->size != sizeof(Shielding_Struct)) {
LogError("OP size error: OP_Shielding expected:[{}] got:[{}]", sizeof(Shielding_Struct), app->size);
return;
}
if (GetClass() != WARRIOR)
{
return;
}
if (shield_target)
{
entity_list.MessageCloseString(
this, false, 100, 0,
END_SHIELDING, GetName(), shield_target->GetName());
for (int y = 0; y < 2; y++)
{
if (shield_target->shielder[y].shielder_id == GetID())
{
shield_target->shielder[y].shielder_id = 0;
shield_target->shielder[y].shielder_bonus = 0;
}
}
}
Shielding_Struct* shield = (Shielding_Struct*)app->pBuffer;
shield_target = entity_list.GetMob(shield->target_id);
bool ack = false;
EQEmu::ItemInstance* inst = GetInv().GetItem(EQEmu::invslot::slotSecondary);
if (!shield_target)
return;
if (inst)
{
const EQEmu::ItemData* shield = inst->GetItem();
if (shield && shield->ItemType == EQEmu::item::ItemTypeShield)
{
for (int x = 0; x < 2; x++)
{
if (shield_target->shielder[x].shielder_id == 0)
{
entity_list.MessageCloseString(
this, false, 100, 0,
START_SHIELDING, GetName(), shield_target->GetName());
shield_target->shielder[x].shielder_id = GetID();
int shieldbonus = shield->AC * 2;
switch (GetAA(197))
{
case 1:
shieldbonus = shieldbonus * 115 / 100;
break;
case 2:
shieldbonus = shieldbonus * 125 / 100;
break;
case 3:
shieldbonus = shieldbonus * 150 / 100;
break;
}
shield_target->shielder[x].shielder_bonus = shieldbonus;
shield_timer.Start();
ack = true;
break;
}
}
}
else
{
Message(0, "You must have a shield equipped to shield a target!");
shield_target = 0;
return;
}
}
else
{
Message(0, "You must have a shield equipped to shield a target!");
shield_target = 0;
return;
}
if (!ack)
{
MessageString(Chat::White, ALREADY_SHIELDED);
shield_target = 0;
return;
}
return;
}
void Client::Handle_OP_ShopEnd(const EQApplicationPacket *app)
{
EQApplicationPacket empty(OP_ShopEndConfirm);
QueuePacket(&empty);
return;
}
void Client::Handle_OP_ShopPlayerBuy(const EQApplicationPacket *app)
{
if (app->size != sizeof(Merchant_Sell_Struct)) {
LogError("Invalid size on OP_ShopPlayerBuy: Expected [{}], Got [{}]",
sizeof(Merchant_Sell_Struct), app->size);
return;
}
RDTSC_Timer t1;
t1.start();
Merchant_Sell_Struct* mp = (Merchant_Sell_Struct*)app->pBuffer;
#if EQDEBUG >= 5
LogDebug("[{}], purchase item", GetName());
DumpPacket(app);
#endif
int merchantid;
bool tmpmer_used = false;
Mob* tmp = entity_list.GetMob(mp->npcid);
if (tmp == 0 || !tmp->IsNPC() || tmp->GetClass() != MERCHANT)
return;
if (mp->quantity < 1) return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, tmp->GetPosition()) > USE_NPC_RANGE2)
return;
merchantid = tmp->CastToNPC()->MerchantType;
uint32 item_id = 0;
std::list<MerchantList> merlist = zone->merchanttable[merchantid];
std::list<MerchantList>::const_iterator itr;
for (itr = merlist.begin(); itr != merlist.end(); ++itr) {
MerchantList ml = *itr;
if (GetLevel() < ml.level_required) {
continue;
}
if (mp->itemslot == ml.slot) {
item_id = ml.item;
break;
}
}
const EQEmu::ItemData* item = nullptr;
uint32 prevcharges = 0;
if (item_id == 0) { //check to see if its on the temporary table
std::list<TempMerchantList> tmp_merlist = zone->tmpmerchanttable[tmp->GetNPCTypeID()];
std::list<TempMerchantList>::const_iterator tmp_itr;
TempMerchantList ml;
for (tmp_itr = tmp_merlist.begin(); tmp_itr != tmp_merlist.end(); ++tmp_itr) {
ml = *tmp_itr;
if (mp->itemslot == ml.slot) {
item_id = ml.item;
tmpmer_used = true;
prevcharges = ml.charges;
break;
}
}
}
item = database.GetItem(item_id);
if (!item) {
//error finding item, client didnt get the update packet for whatever reason, roleplay a tad
Message(Chat::Yellow, "%s tells you 'Sorry, that item is for display purposes only.' as they take the item off the shelf.", tmp->GetCleanName());
auto delitempacket = new EQApplicationPacket(OP_ShopDelItem, sizeof(Merchant_DelItem_Struct));
Merchant_DelItem_Struct* delitem = (Merchant_DelItem_Struct*)delitempacket->pBuffer;
delitem->itemslot = mp->itemslot;
delitem->npcid = mp->npcid;
delitem->playerid = mp->playerid;
delitempacket->priority = 6;
entity_list.QueueCloseClients(tmp, delitempacket); //que for anyone that could be using the merchant so they see the update
safe_delete(delitempacket);
return;
}
if (CheckLoreConflict(item))
{
Message(Chat::Yellow, "You can only have one of a lore item.");
return;
}
if (tmpmer_used && (mp->quantity > prevcharges || item->MaxCharges > 1))
{
if (prevcharges > item->MaxCharges && item->MaxCharges > 1)
mp->quantity = item->MaxCharges;
else
mp->quantity = prevcharges;
}
// Item's stackable, but the quantity they want to buy exceeds the max stackable quantity.
if (item->Stackable && mp->quantity > item->StackSize)
mp->quantity = item->StackSize;
auto outapp = new EQApplicationPacket(OP_ShopPlayerBuy, sizeof(Merchant_Sell_Struct));
Merchant_Sell_Struct* mpo = (Merchant_Sell_Struct*)outapp->pBuffer;
mpo->quantity = mp->quantity;
mpo->playerid = mp->playerid;
mpo->npcid = mp->npcid;
mpo->itemslot = mp->itemslot;
int16 freeslotid = INVALID_INDEX;
int16 charges = 0;
if (item->Stackable || item->MaxCharges > 1)
charges = mp->quantity;
else
charges = item->MaxCharges;
EQEmu::ItemInstance* inst = database.CreateItem(item, charges);
int SinglePrice = 0;
if (RuleB(Merchant, UsePriceMod))
SinglePrice = (item->Price * (RuleR(Merchant, SellCostMod)) * item->SellRate * Client::CalcPriceMod(tmp, false));
else
SinglePrice = (item->Price * (RuleR(Merchant, SellCostMod)) * item->SellRate);
if (item->MaxCharges > 1)
mpo->price = SinglePrice;
else
mpo->price = SinglePrice * mp->quantity;
if (mpo->price < 0)
{
safe_delete(outapp);
safe_delete(inst);
return;
}
// this area needs some work..two inventory insertion check failure points
// below do not return player's money..is this the intended behavior?
if (!TakeMoneyFromPP(mpo->price))
{
char *hacker_str = nullptr;
MakeAnyLenString(&hacker_str, "Vendor Cheat: attempted to buy %i of %i: %s that cost %d cp but only has %d pp %d gp %d sp %d cp\n",
mpo->quantity, item->ID, item->Name,
mpo->price, m_pp.platinum, m_pp.gold, m_pp.silver, m_pp.copper);
database.SetMQDetectionFlag(AccountName(), GetName(), hacker_str, zone->GetShortName());
safe_delete_array(hacker_str);
safe_delete(outapp);
safe_delete(inst);
return;
}
bool stacked = TryStacking(inst);
if (!stacked)
freeslotid = m_inv.FindFreeSlot(false, true, item->Size);
// shouldn't we be reimbursing if these two fail?
//make sure we are not completely full...
if (freeslotid == EQEmu::invslot::slotCursor) {
if (m_inv.GetItem(EQEmu::invslot::slotCursor) != nullptr) {
Message(Chat::Red, "You do not have room for any more items.");
safe_delete(outapp);
safe_delete(inst);
return;
}
}
if (!stacked && freeslotid == INVALID_INDEX)
{
Message(Chat::Red, "You do not have room for any more items.");
safe_delete(outapp);
safe_delete(inst);
return;
}
std::string packet;
if (!stacked && inst) {
PutItemInInventory(freeslotid, *inst);
SendItemPacket(freeslotid, inst, ItemPacketTrade);
}
else if (!stacked) {
LogError("OP_ShopPlayerBuy: item->ItemClass Unknown! Type: [{}]", item->ItemClass);
}
QueuePacket(outapp);
if (inst && tmpmer_used) {
int32 new_charges = prevcharges - mp->quantity;
zone->SaveTempItem(merchantid, tmp->GetNPCTypeID(), item_id, new_charges);
if (new_charges <= 0) {
auto delitempacket = new EQApplicationPacket(OP_ShopDelItem, sizeof(Merchant_DelItem_Struct));
Merchant_DelItem_Struct* delitem = (Merchant_DelItem_Struct*)delitempacket->pBuffer;
delitem->itemslot = mp->itemslot;
delitem->npcid = mp->npcid;
delitem->playerid = mp->playerid;
delitempacket->priority = 6;
entity_list.QueueClients(tmp, delitempacket); //que for anyone that could be using the merchant so they see the update
safe_delete(delitempacket);
}
else {
// Update the charges/quantity in the merchant window
inst->SetCharges(new_charges);
inst->SetPrice(SinglePrice);
inst->SetMerchantSlot(mp->itemslot);
inst->SetMerchantCount(new_charges);
SendItemPacket(mp->itemslot, inst, ItemPacketMerchant);
}
}
safe_delete(inst);
safe_delete(outapp);
// start QS code
// stacking purchases not supported at this time - entire process will need some work to catch them properly
if (RuleB(QueryServ, PlayerLogMerchantTransactions)) {
auto qspack =
new ServerPacket(ServerOP_QSPlayerLogMerchantTransactions,
sizeof(QSMerchantLogTransaction_Struct) + sizeof(QSTransactionItems_Struct));
QSMerchantLogTransaction_Struct* qsaudit = (QSMerchantLogTransaction_Struct*)qspack->pBuffer;
qsaudit->zone_id = zone->GetZoneID();
qsaudit->merchant_id = tmp->CastToNPC()->MerchantType;
qsaudit->merchant_money.platinum = 0;
qsaudit->merchant_money.gold = 0;
qsaudit->merchant_money.silver = 0;
qsaudit->merchant_money.copper = 0;
qsaudit->merchant_count = 1;
qsaudit->char_id = character_id;
qsaudit->char_money.platinum = (mpo->price / 1000);
qsaudit->char_money.gold = (mpo->price / 100) % 10;
qsaudit->char_money.silver = (mpo->price / 10) % 10;
qsaudit->char_money.copper = mpo->price % 10;
qsaudit->char_count = 0;
qsaudit->items[0].char_slot = freeslotid == INVALID_INDEX ? 0 : freeslotid;
qsaudit->items[0].item_id = item->ID;
qsaudit->items[0].charges = mpo->quantity;
const EQEmu::ItemInstance* audit_inst = m_inv[freeslotid];
if (audit_inst) {
qsaudit->items[0].aug_1 = audit_inst->GetAugmentItemID(0);
qsaudit->items[0].aug_2 = audit_inst->GetAugmentItemID(1);
qsaudit->items[0].aug_3 = audit_inst->GetAugmentItemID(2);
qsaudit->items[0].aug_4 = audit_inst->GetAugmentItemID(3);
qsaudit->items[0].aug_5 = audit_inst->GetAugmentItemID(4);
}
else {
qsaudit->items[0].aug_1 = 0;
qsaudit->items[0].aug_2 = 0;
qsaudit->items[0].aug_3 = 0;
qsaudit->items[0].aug_4 = 0;
qsaudit->items[0].aug_5 = 0;
if (freeslotid != INVALID_INDEX) {
LogError("Handle_OP_ShopPlayerBuy: QS Audit could not locate merchant ([{}]) purchased item in player ([{}]) inventory slot ([{}])",
qsaudit->merchant_id, qsaudit->char_id, freeslotid);
}
}
audit_inst = nullptr;
if (worldserver.Connected()) { worldserver.SendPacket(qspack); }
safe_delete(qspack);
}
// end QS code
if (RuleB(EventLog, RecordBuyFromMerchant))
LogMerchant(this, tmp, mpo->quantity, mpo->price, item, true);
if ((RuleB(Character, EnableDiscoveredItems)))
{
if (!GetGM() && !IsDiscovered(item_id))
DiscoverItem(item_id);
}
t1.stop();
std::cout << "At 1: " << t1.getDuration() << std::endl;
return;
}
void Client::Handle_OP_ShopPlayerSell(const EQApplicationPacket *app)
{
if (app->size != sizeof(Merchant_Purchase_Struct)) {
LogError("Invalid size on OP_ShopPlayerSell: Expected [{}], Got [{}]",
sizeof(Merchant_Purchase_Struct), app->size);
return;
}
RDTSC_Timer t1(true);
Merchant_Purchase_Struct* mp = (Merchant_Purchase_Struct*)app->pBuffer;
Mob* vendor = entity_list.GetMob(mp->npcid);
if (vendor == 0 || !vendor->IsNPC() || vendor->GetClass() != MERCHANT)
return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, vendor->GetPosition()) > USE_NPC_RANGE2)
return;
uint32 price = 0;
uint32 itemid = GetItemIDAt(mp->itemslot);
if (itemid == 0)
return;
const EQEmu::ItemData* item = database.GetItem(itemid);
EQEmu::ItemInstance* inst = GetInv().GetItem(mp->itemslot);
if (!item || !inst) {
Message(Chat::Red, "You seemed to have misplaced that item..");
return;
}
if (mp->quantity > 1)
{
if ((inst->GetCharges() < 0) || (mp->quantity > (uint32)inst->GetCharges()))
return;
}
if (!item->NoDrop) {
//Message(Chat::Red,"%s tells you, 'LOL NOPE'", vendor->GetName());
return;
}
uint32 cost_quantity = mp->quantity;
if (inst->IsCharged())
uint32 cost_quantity = 1;
uint32 i;
if (RuleB(Merchant, UsePriceMod)) {
for (i = 1; i <= cost_quantity; i++) {
price = (uint32)((item->Price * i)*(RuleR(Merchant, BuyCostMod))*Client::CalcPriceMod(vendor, true) + 0.5); // need to round up, because client does it automatically when displaying price
if (price > 4000000000) {
cost_quantity = i;
mp->quantity = i;
break;
}
}
}
else {
for (i = 1; i <= cost_quantity; i++) {
price = (uint32)((item->Price * i)*(RuleR(Merchant, BuyCostMod)) + 0.5); // need to round up, because client does it automatically when displaying price
if (price > 4000000000) {
cost_quantity = i;
mp->quantity = i;
break;
}
}
}
AddMoneyToPP(price, false);
if (inst->IsStackable() || inst->IsCharged())
{
unsigned int i_quan = inst->GetCharges();
if (mp->quantity > i_quan || inst->IsCharged())
mp->quantity = i_quan;
}
else
mp->quantity = 1;
if (RuleB(EventLog, RecordSellToMerchant))
LogMerchant(this, vendor, mp->quantity, price, item, false);
int charges = mp->quantity;
//Hack workaround so usable items with 0 charges aren't simply deleted
if (charges == 0 && item->ItemType != 11 && item->ItemType != 17 && item->ItemType != 19 && item->ItemType != 21)
charges = 1;
int freeslot = 0;
if (charges > 0 && (freeslot = zone->SaveTempItem(vendor->CastToNPC()->MerchantType, vendor->GetNPCTypeID(), itemid, charges, true)) > 0) {
EQEmu::ItemInstance* inst2 = inst->Clone();
while (true) {
if (inst2 == nullptr)
break;
if (RuleB(Merchant, UsePriceMod)) {
inst2->SetPrice(item->Price*(RuleR(Merchant, SellCostMod))*item->SellRate*Client::CalcPriceMod(vendor, false));
}
else
inst2->SetPrice(item->Price*(RuleR(Merchant, SellCostMod))*item->SellRate);
inst2->SetMerchantSlot(freeslot);
uint32 MerchantQuantity = zone->GetTempMerchantQuantity(vendor->GetNPCTypeID(), freeslot);
if (inst2->IsStackable()) {
inst2->SetCharges(MerchantQuantity);
}
inst2->SetMerchantCount(MerchantQuantity);
SendItemPacket(freeslot - 1, inst2, ItemPacketMerchant);
safe_delete(inst2);
break;
}
}
// start QS code
if (RuleB(QueryServ, PlayerLogMerchantTransactions)) {
auto qspack =
new ServerPacket(ServerOP_QSPlayerLogMerchantTransactions,
sizeof(QSMerchantLogTransaction_Struct) + sizeof(QSTransactionItems_Struct));
QSMerchantLogTransaction_Struct* qsaudit = (QSMerchantLogTransaction_Struct*)qspack->pBuffer;
qsaudit->zone_id = zone->GetZoneID();
qsaudit->merchant_id = vendor->CastToNPC()->MerchantType;
qsaudit->merchant_money.platinum = (price / 1000);
qsaudit->merchant_money.gold = (price / 100) % 10;
qsaudit->merchant_money.silver = (price / 10) % 10;
qsaudit->merchant_money.copper = price % 10;
qsaudit->merchant_count = 0;
qsaudit->char_id = character_id;
qsaudit->char_money.platinum = 0;
qsaudit->char_money.gold = 0;
qsaudit->char_money.silver = 0;
qsaudit->char_money.copper = 0;
qsaudit->char_count = 1;
qsaudit->items[0].char_slot = mp->itemslot;
qsaudit->items[0].item_id = itemid;
qsaudit->items[0].charges = charges;
qsaudit->items[0].aug_1 = m_inv[mp->itemslot]->GetAugmentItemID(1);
qsaudit->items[0].aug_2 = m_inv[mp->itemslot]->GetAugmentItemID(2);
qsaudit->items[0].aug_3 = m_inv[mp->itemslot]->GetAugmentItemID(3);
qsaudit->items[0].aug_4 = m_inv[mp->itemslot]->GetAugmentItemID(4);
qsaudit->items[0].aug_5 = m_inv[mp->itemslot]->GetAugmentItemID(5);
if (worldserver.Connected()) { worldserver.SendPacket(qspack); }
safe_delete(qspack);
}
// end QS code
// Now remove the item from the player, this happens regardless of outcome
if (!inst->IsStackable())
this->DeleteItemInInventory(mp->itemslot, 0, false);
else {
// HACK: DeleteItemInInventory uses int8 for quantity type. There is no consistent use of types in code in this path so for now iteratively delete from inventory.
if (mp->quantity > 255) {
uint32 temp = mp->quantity;
while (temp > 255 && temp != 0) {
// Delete chunks of 255
this->DeleteItemInInventory(mp->itemslot, 255, false);
temp -= 255;
}
if (temp != 0) {
// Delete remaining
this->DeleteItemInInventory(mp->itemslot, temp, false);
}
}
else {
this->DeleteItemInInventory(mp->itemslot, mp->quantity, false);
}
}
//This forces the price to show up correctly for charged items.
if (inst->IsCharged())
mp->quantity = 1;
auto outapp = new EQApplicationPacket(OP_ShopPlayerSell, sizeof(Merchant_Purchase_Struct));
Merchant_Purchase_Struct* mco = (Merchant_Purchase_Struct*)outapp->pBuffer;
mco->npcid = vendor->GetID();
mco->itemslot = mp->itemslot;
mco->quantity = mp->quantity;
mco->price = price;
QueuePacket(outapp);
safe_delete(outapp);
SendMoneyUpdate();
t1.start();
Save(1);
t1.stop();
std::cout << "Save took: " << t1.getDuration() << std::endl;
return;
}
void Client::Handle_OP_ShopRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(Merchant_Click_Struct)) {
LogError("Wrong size: OP_ShopRequest, size=[{}], expected [{}]", app->size, sizeof(Merchant_Click_Struct));
return;
}
Merchant_Click_Struct* mc = (Merchant_Click_Struct*)app->pBuffer;
// Send back opcode OP_ShopRequest - tells client to open merchant window.
//EQApplicationPacket* outapp = new EQApplicationPacket(OP_ShopRequest, sizeof(Merchant_Click_Struct));
//Merchant_Click_Struct* mco=(Merchant_Click_Struct*)outapp->pBuffer;
int merchantid = 0;
Mob* tmp = entity_list.GetMob(mc->npcid);
if (tmp == 0 || !tmp->IsNPC() || tmp->GetClass() != MERCHANT)
return;
//you have to be somewhat close to them to be properly using them
if (DistanceSquared(m_Position, tmp->GetPosition()) > USE_NPC_RANGE2)
return;
merchantid = tmp->CastToNPC()->MerchantType;
int action = 1;
if (merchantid == 0) {
auto outapp = new EQApplicationPacket(OP_ShopRequest, sizeof(Merchant_Click_Struct));
Merchant_Click_Struct* mco = (Merchant_Click_Struct*)outapp->pBuffer;
mco->npcid = mc->npcid;
mco->playerid = 0;
mco->command = 1; //open...
mco->rate = 1.0;
QueuePacket(outapp);
safe_delete(outapp);
return;
}
if (tmp->IsEngaged()) {
this->MessageString(Chat::White, MERCHANT_BUSY);
action = 0;
}
if (GetFeigned() || IsInvisible())
{
Message(0, "You cannot use a merchant right now.");
action = 0;
}
int primaryfaction = tmp->CastToNPC()->GetPrimaryFaction();
int factionlvl = GetFactionLevel(CharacterID(), tmp->CastToNPC()->GetNPCTypeID(), GetRace(), GetClass(), GetDeity(), primaryfaction, tmp);
if (factionlvl >= 7) {
MerchantRejectMessage(tmp, primaryfaction);
action = 0;
}
if (tmp->Charmed())
action = 0;
// 1199 I don't have time for that now. etc
if (!tmp->CastToNPC()->IsMerchantOpen()) {
tmp->SayString(zone->random.Int(1199, 1202));
action = 0;
}
auto outapp = new EQApplicationPacket(OP_ShopRequest, sizeof(Merchant_Click_Struct));
Merchant_Click_Struct* mco = (Merchant_Click_Struct*)outapp->pBuffer;
mco->npcid = mc->npcid;
mco->playerid = 0;
mco->command = action; // Merchant command 0x01 = open
if (RuleB(Merchant, UsePriceMod)) {
mco->rate = 1 / ((RuleR(Merchant, BuyCostMod))*Client::CalcPriceMod(tmp, true)); // works
}
else
mco->rate = 1 / (RuleR(Merchant, BuyCostMod));
outapp->priority = 6;
QueuePacket(outapp);
safe_delete(outapp);
if (action == 1)
BulkSendMerchantInventory(merchantid, tmp->GetNPCTypeID());
return;
}
void Client::Handle_OP_Sneak(const EQApplicationPacket *app)
{
if (!HasSkill(EQEmu::skills::SkillSneak) && GetSkill(EQEmu::skills::SkillSneak) == 0) {
return; //You cannot sneak if you do not have sneak
}
if (!p_timers.Expired(&database, pTimerSneak, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerSneak, SneakReuseTime - 1);
bool was = sneaking;
if (sneaking) {
sneaking = false;
hidden = false;
improved_hidden = false;
auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x03;
sa_out->parameter = 0;
entity_list.QueueClients(this, outapp, true);
safe_delete(outapp);
}
else {
CheckIncreaseSkill(EQEmu::skills::SkillSneak, nullptr, 5);
}
float hidechance = ((GetSkill(EQEmu::skills::SkillSneak) / 300.0f) + .25) * 100;
float random = zone->random.Real(0, 99);
if (!was && random < hidechance) {
sneaking = true;
}
auto outapp = new EQApplicationPacket(OP_SpawnAppearance, sizeof(SpawnAppearance_Struct));
SpawnAppearance_Struct* sa_out = (SpawnAppearance_Struct*)outapp->pBuffer;
sa_out->spawn_id = GetID();
sa_out->type = 0x0F;
sa_out->parameter = sneaking;
QueuePacket(outapp);
safe_delete(outapp);
if (GetClass() == ROGUE) {
outapp = new EQApplicationPacket(OP_SimpleMessage, 12);
SimpleMessage_Struct *msg = (SimpleMessage_Struct *)outapp->pBuffer;
msg->color = 0x010E;
if (sneaking) {
msg->string_id = 347;
}
else {
msg->string_id = 348;
}
FastQueuePacket(&outapp);
}
return;
}
void Client::Handle_OP_SpawnAppearance(const EQApplicationPacket *app)
{
if (app->size != sizeof(SpawnAppearance_Struct)) {
std::cout << "Wrong size on OP_SpawnAppearance. Got: " << app->size << ", Expected: " << sizeof(SpawnAppearance_Struct) << std::endl;
return;
}
SpawnAppearance_Struct* sa = (SpawnAppearance_Struct*)app->pBuffer;
if (sa->spawn_id != GetID())
return;
if (sa->type == AT_Invis) {
if (sa->parameter != 0)
{
if (!HasSkill(EQEmu::skills::SkillHide) && GetSkill(EQEmu::skills::SkillHide) == 0)
{
if (ClientVersion() < EQEmu::versions::ClientVersion::SoF)
{
char *hack_str = nullptr;
MakeAnyLenString(&hack_str, "Player sent OP_SpawnAppearance with AT_Invis: %i", sa->parameter);
database.SetMQDetectionFlag(this->account_name, this->name, hack_str, zone->GetShortName());
safe_delete_array(hack_str);
}
}
return;
}
invisible = false;
hidden = false;
improved_hidden = false;
entity_list.QueueClients(this, app, true);
return;
}
else if (sa->type == AT_Anim) {
if (IsAIControlled())
return;
if (sa->parameter == ANIM_STAND) {
SetAppearance(eaStanding);
playeraction = 0;
SetFeigned(false);
BindWound(this, false, true);
camp_timer.Disable();
}
else if (sa->parameter == ANIM_SIT) {
SetAppearance(eaSitting);
playeraction = 1;
if (!UseBardSpellLogic())
InterruptSpell();
SetFeigned(false);
BindWound(this, false, true);
tmSitting = Timer::GetCurrentTime();
BuffFadeBySitModifier();
}
else if (sa->parameter == ANIM_CROUCH) {
if (!UseBardSpellLogic())
InterruptSpell();
SetAppearance(eaCrouching);
playeraction = 2;
SetFeigned(false);
}
else if (sa->parameter == ANIM_DEATH) { // feign death too
SetAppearance(eaDead);
playeraction = 3;
InterruptSpell();
}
else if (sa->parameter == ANIM_LOOT) {
SetAppearance(eaLooting);
playeraction = 4;
SetFeigned(false);
}
else {
LogError("Client [{}] :: unknown appearance [{}]", name, (int)sa->parameter);
return;
}
entity_list.QueueClients(this, app, true);
}
else if (sa->type == AT_Anon) {
if (!anon_toggle_timer.Check()) {
return;
}
// For Anon/Roleplay
if (sa->parameter == 1) { // Anon
m_pp.anon = 1;
}
else if ((sa->parameter == 2) || (sa->parameter == 3)) { // This is Roleplay, or anon+rp
m_pp.anon = 2;
}
else if (sa->parameter == 0) { // This is Non-Anon
m_pp.anon = 0;
}
else {
LogError("Client [{}] :: unknown Anon/Roleplay Switch [{}]", name, (int)sa->parameter);
return;
}
entity_list.QueueClients(this, app, true);
UpdateWho();
}
else if ((sa->type == AT_HP) && (dead == 0)) {
return;
}
else if (sa->type == AT_AFK) {
if (afk_toggle_timer.Check()) {
AFK = (sa->parameter == 1);
entity_list.QueueClients(this, app, true);
}
}
else if (sa->type == AT_Split) {
m_pp.autosplit = (sa->parameter == 1);
}
else if (sa->type == AT_Sneak) {
if (sneaking == 0)
return;
if (sa->parameter != 0)
{
if (!HasSkill(EQEmu::skills::SkillSneak))
{
char *hack_str = nullptr;
MakeAnyLenString(&hack_str, "Player sent OP_SpawnAppearance with AT_Sneak: %i", sa->parameter);
database.SetMQDetectionFlag(this->account_name, this->name, hack_str, zone->GetShortName());
safe_delete_array(hack_str);
}
return;
}
sneaking = 0;
entity_list.QueueClients(this, app, true);
}
else if (sa->type == AT_Size)
{
char *hack_str = nullptr;
MakeAnyLenString(&hack_str, "Player sent OP_SpawnAppearance with AT_Size: %i", sa->parameter);
database.SetMQDetectionFlag(this->account_name, this->name, hack_str, zone->GetShortName());
safe_delete_array(hack_str);
}
else if (sa->type == AT_Light) // client emitting light (lightstone, shiny shield)
{
//don't do anything with this
}
else if (sa->type == AT_Levitate)
{
// don't do anything with this, we tell the client when it's
// levitating, not the other way around
}
else if (sa->type == AT_ShowHelm)
{
if (helm_toggle_timer.Check()) {
m_pp.showhelm = (sa->parameter == 1);
entity_list.QueueClients(this, app, true);
}
}
else if (sa->type == AT_GroupConsent)
{
m_pp.groupAutoconsent = (sa->parameter == 1);
ConsentCorpses("Group", (sa->parameter != 1));
}
else if (sa->type == AT_RaidConsent)
{
m_pp.raidAutoconsent = (sa->parameter == 1);
ConsentCorpses("Raid", (sa->parameter != 1));
}
else if (sa->type == AT_GuildConsent)
{
m_pp.guildAutoconsent = (sa->parameter == 1);
ConsentCorpses("Guild", (sa->parameter != 1));
}
else {
std::cout << "Unknown SpawnAppearance type: 0x" << std::hex << std::setw(4) << std::setfill('0') << sa->type << std::dec
<< " value: 0x" << std::hex << std::setw(8) << std::setfill('0') << sa->parameter << std::dec << std::endl;
}
return;
}
void Client::Handle_OP_Split(const EQApplicationPacket *app)
{
if (app->size != sizeof(Split_Struct)) {
LogError("Wrong size: OP_Split, size=[{}], expected [{}]", app->size, sizeof(Split_Struct));
return;
}
// The client removes the money on its own, but we have to
// update our state anyway, and make sure they had enough to begin
// with.
Split_Struct *split = (Split_Struct *)app->pBuffer;
//Per the note above, Im not exactly sure what to do on error
//to notify the client of the error...
Group *group = nullptr;
Raid *raid = nullptr;
if (IsRaidGrouped())
raid = GetRaid();
else if (IsGrouped())
group = GetGroup();
// is there an actual error message for this?
if (raid == nullptr && group == nullptr) {
Message(Chat::Red, "You can not split money if you're not in a group.");
return;
}
if (!TakeMoneyFromPP(static_cast<uint64>(split->copper) +
10 * static_cast<uint64>(split->silver) +
100 * static_cast<uint64>(split->gold) +
1000 * static_cast<uint64>(split->platinum))) {
Message(Chat::Red, "You do not have enough money to do that split.");
return;
}
if (raid)
raid->SplitMoney(raid->GetGroup(this), split->copper, split->silver, split->gold, split->platinum);
else if (group)
group->SplitMoney(split->copper, split->silver, split->gold, split->platinum);
return;
}
void Client::Handle_OP_Surname(const EQApplicationPacket *app)
{
if (app->size != sizeof(Surname_Struct))
{
LogDebug("Size mismatch in Surname expected [{}] got [{}]", sizeof(Surname_Struct), app->size);
return;
}
if (!p_timers.Expired(&database, pTimerSurnameChange, false) && !GetGM())
{
Message(Chat::Yellow, "You may only change surnames once every 7 days, your /surname is currently on cooldown.");
return;
}
if (GetLevel() < 20)
{
MessageString(Chat::Yellow, SURNAME_LEVEL);
return;
}
Surname_Struct* surname = (Surname_Struct*)app->pBuffer;
char *c = nullptr;
bool first = true;
for (c = surname->lastname; *c; c++)
{
if (first)
{
*c = toupper(*c);
first = false;
}
else
{
*c = tolower(*c);
}
}
if (strlen(surname->lastname) >= 20) {
MessageString(Chat::Yellow, SURNAME_TOO_LONG);
return;
}
if (!database.CheckNameFilter(surname->lastname, true))
{
MessageString(Chat::Yellow, SURNAME_REJECTED);
return;
}
ChangeLastName(surname->lastname);
p_timers.Start(pTimerSurnameChange, 604800);
EQApplicationPacket* outapp = app->Copy();
outapp = app->Copy();
surname = (Surname_Struct*)outapp->pBuffer;
surname->unknown0064 = 1;
FastQueuePacket(&outapp);
return;
}
void Client::Handle_OP_SwapSpell(const EQApplicationPacket *app)
{
if (app->size != sizeof(SwapSpell_Struct)) {
std::cout << "Wrong size on OP_SwapSpell. Got: " << app->size << ", Expected: " << sizeof(SwapSpell_Struct) << std::endl;
return;
}
const SwapSpell_Struct* swapspell = (const SwapSpell_Struct*)app->pBuffer;
int swapspelltemp;
const auto sbs = EQEmu::spells::DynamicLookup(ClientVersion(), GetGM())->SpellbookSize;
if (swapspell->from_slot < 0 || swapspell->from_slot >= sbs)
return;
if (swapspell->to_slot < 0 || swapspell->to_slot >= sbs)
return;
swapspelltemp = m_pp.spell_book[swapspell->from_slot];
if (swapspelltemp < 0) {
return;
}
m_pp.spell_book[swapspell->from_slot] = m_pp.spell_book[swapspell->to_slot];
m_pp.spell_book[swapspell->to_slot] = swapspelltemp;
/* Save Spell Swaps */
if (!database.SaveCharacterSpell(this->CharacterID(), m_pp.spell_book[swapspell->from_slot], swapspell->from_slot)) {
database.DeleteCharacterSpell(this->CharacterID(), m_pp.spell_book[swapspell->from_slot], swapspell->from_slot);
}
if (!database.SaveCharacterSpell(this->CharacterID(), swapspelltemp, swapspell->to_slot)) {
database.DeleteCharacterSpell(this->CharacterID(), swapspelltemp, swapspell->to_slot);
}
QueuePacket(app);
return;
}
void Client::Handle_OP_TargetCommand(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClientTarget_Struct)) {
LogError("OP size error: OP_TargetMouse expected:[{}] got:[{}]", sizeof(ClientTarget_Struct), app->size);
return;
}
if (GetTarget())
{
GetTarget()->IsTargeted(-1);
}
// Locate and cache new target
ClientTarget_Struct* ct = (ClientTarget_Struct*)app->pBuffer;
pClientSideTarget = ct->new_target;
if (!IsAIControlled())
{
Mob *nt = entity_list.GetMob(ct->new_target);
if (nt)
{
SetTarget(nt);
bool inspect_buffs = false;
// rank 1 gives you ability to see NPC buffs in target window (SoD+)
if (nt->IsNPC()) {
if (IsRaidGrouped()) {
Raid *raid = GetRaid();
if (raid) {
uint32 gid = raid->GetGroup(this);
if (gid < 12 && raid->GroupCount(gid) > 2)
inspect_buffs = raid->GetLeadershipAA(groupAAInspectBuffs, gid);
}
}
else {
Group *group = GetGroup();
if (group && group->GroupCount() > 2)
inspect_buffs = group->GetLeadershipAA(groupAAInspectBuffs);
}
}
if (GetGM() || RuleB(Spells, AlwaysSendTargetsBuffs) || nt == this || inspect_buffs || (nt->IsClient() && !nt->CastToClient()->GetPVP()) ||
(nt->IsPet() && nt->GetOwner() && nt->GetOwner()->IsClient() && !nt->GetOwner()->CastToClient()->GetPVP()) ||
#ifdef BOTS
(nt->IsBot() && nt->GetOwner() && nt->GetOwner()->IsClient() && !nt->GetOwner()->CastToClient()->GetPVP()) || // TODO: bot pets
#endif
(nt->IsMerc() && nt->GetOwner() && nt->GetOwner()->IsClient() && !nt->GetOwner()->CastToClient()->GetPVP()))
{
nt->SendBuffsToClient(this);
}
}
else
{
SetTarget(nullptr);
SetHoTT(0);
UpdateXTargetType(TargetsTarget, nullptr);
Group *g = GetGroup();
if (g && g->HasRole(this, RoleAssist))
g->SetGroupAssistTarget(0);
if (g && g->HasRole(this, RoleTank))
g->SetGroupTankTarget(0);
if (g && g->HasRole(this, RolePuller))
g->SetGroupPullerTarget(0);
return;
}
}
else
{
SetTarget(nullptr);
SetHoTT(0);
UpdateXTargetType(TargetsTarget, nullptr);
return;
}
// HoTT
if (GetTarget() && GetTarget()->GetTarget())
{
SetHoTT(GetTarget()->GetTarget()->GetID());
UpdateXTargetType(TargetsTarget, GetTarget()->GetTarget());
}
else
{
SetHoTT(0);
UpdateXTargetType(TargetsTarget, nullptr);
}
Group *g = GetGroup();
if (g && g->HasRole(this, RoleAssist))
g->SetGroupAssistTarget(GetTarget());
if (g && g->HasRole(this, RoleTank))
g->SetGroupTankTarget(GetTarget());
if (g && g->HasRole(this, RolePuller))
g->SetGroupPullerTarget(GetTarget());
// For /target, send reject or success packet
if (app->GetOpcode() == OP_TargetCommand) {
if (GetTarget() && !GetTarget()->CastToMob()->IsInvisible(this) && (DistanceSquared(m_Position, GetTarget()->GetPosition()) <= TARGETING_RANGE*TARGETING_RANGE || GetGM())) {
if (GetTarget()->GetBodyType() == BT_NoTarget2 || GetTarget()->GetBodyType() == BT_Special
|| GetTarget()->GetBodyType() == BT_NoTarget)
{
//Targeting something we shouldn't with /target
//but the client allows this without MQ so you don't flag it
auto outapp = new EQApplicationPacket(OP_TargetReject, sizeof(TargetReject_Struct));
outapp->pBuffer[0] = 0x2f;
outapp->pBuffer[1] = 0x01;
outapp->pBuffer[4] = 0x0d;
if (GetTarget())
{
SetTarget(nullptr);
}
QueuePacket(outapp);
safe_delete(outapp);
return;
}
QueuePacket(app);
GetTarget()->IsTargeted(1);
SendHPUpdate();
}
else
{
auto outapp = new EQApplicationPacket(OP_TargetReject, sizeof(TargetReject_Struct));
outapp->pBuffer[0] = 0x2f;
outapp->pBuffer[1] = 0x01;
outapp->pBuffer[4] = 0x0d;
if (GetTarget())
{
SetTarget(nullptr);
}
QueuePacket(outapp);
safe_delete(outapp);
}
}
else
{
if (GetTarget())
{
if (GetGM())
{
GetTarget()->IsTargeted(1);
return;
}
else if (RuleB(Character, AllowMQTarget))
{
GetTarget()->IsTargeted(1);
return;
}
else if (GetTarget()->IsClient())
{
//make sure this client is in our raid/group
GetTarget()->IsTargeted(1);
return;
}
else if (GetTarget()->GetBodyType() == BT_NoTarget2 || GetTarget()->GetBodyType() == BT_Special
|| GetTarget()->GetBodyType() == BT_NoTarget)
{
char *hacker_str = nullptr;
MakeAnyLenString(&hacker_str, "%s attempting to target something untargetable, %s bodytype: %i\n",
GetName(), GetTarget()->GetName(), (int)GetTarget()->GetBodyType());
database.SetMQDetectionFlag(AccountName(), GetName(), hacker_str, zone->GetShortName());
safe_delete_array(hacker_str);
SetTarget((Mob*)nullptr);
return;
}
else if (IsXTarget(GetTarget()))
{
GetTarget()->IsTargeted(1);
return;
}
else if (GetTarget()->IsPetOwnerClient())
{
GetTarget()->IsTargeted(1);
return;
}
else if (GetBindSightTarget())
{
if (DistanceSquared(GetBindSightTarget()->GetPosition(), GetTarget()->GetPosition()) > (zone->newzone_data.maxclip*zone->newzone_data.maxclip))
{
if (DistanceSquared(m_Position, GetTarget()->GetPosition()) > (zone->newzone_data.maxclip*zone->newzone_data.maxclip))
{
char *hacker_str = nullptr;
MakeAnyLenString(&hacker_str, "%s attempting to target something beyond the clip plane of %.2f units,"
" from (%.2f, %.2f, %.2f) to %s (%.2f, %.2f, %.2f)", GetName(),
(zone->newzone_data.maxclip*zone->newzone_data.maxclip),
GetX(), GetY(), GetZ(), GetTarget()->GetName(), GetTarget()->GetX(), GetTarget()->GetY(), GetTarget()->GetZ());
database.SetMQDetectionFlag(AccountName(), GetName(), hacker_str, zone->GetShortName());
safe_delete_array(hacker_str);
SetTarget(nullptr);
return;
}
}
}
else if (DistanceSquared(m_Position, GetTarget()->GetPosition()) > (zone->newzone_data.maxclip*zone->newzone_data.maxclip))
{
char *hacker_str = nullptr;
MakeAnyLenString(&hacker_str, "%s attempting to target something beyond the clip plane of %.2f units,"
" from (%.2f, %.2f, %.2f) to %s (%.2f, %.2f, %.2f)", GetName(),
(zone->newzone_data.maxclip*zone->newzone_data.maxclip),
GetX(), GetY(), GetZ(), GetTarget()->GetName(), GetTarget()->GetX(), GetTarget()->GetY(), GetTarget()->GetZ());
database.SetMQDetectionFlag(AccountName(), GetName(), hacker_str, zone->GetShortName());
safe_delete_array(hacker_str);
SetTarget(nullptr);
return;
}
GetTarget()->IsTargeted(1);
}
}
return;
}
void Client::Handle_OP_TargetMouse(const EQApplicationPacket *app)
{
Handle_OP_TargetCommand(app);
}
void Client::Handle_OP_TaskHistoryRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(TaskHistoryRequest_Struct)) {
LogDebug("Size mismatch in OP_TaskHistoryRequest expected [{}] got [{}]", sizeof(TaskHistoryRequest_Struct), app->size);
DumpPacket(app);
return;
}
TaskHistoryRequest_Struct *ths = (TaskHistoryRequest_Struct*)app->pBuffer;
if (RuleB(TaskSystem, EnableTaskSystem) && taskstate)
taskstate->SendTaskHistory(this, ths->TaskIndex);
}
void Client::Handle_OP_Taunt(const EQApplicationPacket *app)
{
if (app->size != sizeof(ClientTarget_Struct)) {
std::cout << "Wrong size on OP_Taunt. Got: " << app->size << ", Expected: " << sizeof(ClientTarget_Struct) << std::endl;
return;
}
if (!p_timers.Expired(&database, pTimerTaunt, false)) {
Message(Chat::Red, "Ability recovery time not yet met.");
return;
}
p_timers.Start(pTimerTaunt, TauntReuseTime - 1);
if (GetTarget() == nullptr || !GetTarget()->IsNPC())
return;
if (!zone->CanDoCombat()) {
Message(Chat::Red, "You cannot taunt in a no combat zone.");
return;
}
Taunt(GetTarget()->CastToNPC(), false);
return;
}
void Client::Handle_OP_TestBuff(const EQApplicationPacket *app)
{
return;
}
void Client::Handle_OP_TGB(const EQApplicationPacket *app)
{
OPTGB(app);
return;
}
void Client::Handle_OP_Track(const EQApplicationPacket *app)
{
if (GetClass() != RANGER && GetClass() != DRUID && GetClass() != BARD)
return;
if (GetSkill(EQEmu::skills::SkillTracking) == 0)
SetSkill(EQEmu::skills::SkillTracking, 1);
else
CheckIncreaseSkill(EQEmu::skills::SkillTracking, nullptr, 15);
if (!entity_list.MakeTrackPacket(this))
LogError("Unable to generate OP_Track packet requested by client");
return;
}
void Client::Handle_OP_TrackTarget(const EQApplicationPacket *app)
{
int PlayerClass = GetClass();
if ((PlayerClass != RANGER) && (PlayerClass != DRUID) && (PlayerClass != BARD))
return;
if (app->size != sizeof(TrackTarget_Struct))
{
LogError("Invalid size for OP_TrackTarget: Expected: [{}], Got: [{}]",
sizeof(TrackTarget_Struct), app->size);
return;
}
TrackTarget_Struct *tts = (TrackTarget_Struct*)app->pBuffer;
TrackingID = tts->EntityID;
}
void Client::Handle_OP_TrackUnknown(const EQApplicationPacket *app)
{
// size 0 send right after OP_Track
return;
}
void Client::Handle_OP_TradeAcceptClick(const EQApplicationPacket *app)
{
Mob* with = trade->With();
trade->state = TradeAccepted;
if (with && with->IsClient()) {
//finish trade...
// Have both accepted?
Client* other = with->CastToClient();
other->QueuePacket(app);
if (other->trade->state == trade->state) {
other->trade->state = TradeCompleting;
trade->state = TradeCompleting;
if (CheckTradeLoreConflict(other) || other->CheckTradeLoreConflict(this)) {
MessageString(Chat::Red, TRADE_CANCEL_LORE);
other->MessageString(Chat::Red, TRADE_CANCEL_LORE);
this->FinishTrade(this);
other->FinishTrade(other);
other->trade->Reset();
trade->Reset();
}
else if (CheckTradeNonDroppable()) {
MessageString(Chat::Red, TRADE_HAS_BEEN_CANCELLED);
other->MessageString(Chat::Red, TRADE_HAS_BEEN_CANCELLED);
this->FinishTrade(this);
other->FinishTrade(other);
other->trade->Reset();
trade->Reset();
Message(Chat::Yellow, "Hacking activity detected in trade transaction.");
// TODO: query (this) as a hacker
}
else if (other->CheckTradeNonDroppable()) {
MessageString(Chat::Red, TRADE_HAS_BEEN_CANCELLED);
other->MessageString(Chat::Red, TRADE_HAS_BEEN_CANCELLED);
this->FinishTrade(this);
other->FinishTrade(other);
other->trade->Reset();
trade->Reset();
other->Message(Chat::Yellow, "Hacking activity detected in trade transaction.");
// TODO: query (other) as a hacker
}
else {
// Audit trade to database for both trade streams
other->trade->LogTrade();
trade->LogTrade();
// start QS code
if (RuleB(QueryServ, PlayerLogTrades)) {
QSPlayerLogTrade_Struct event_entry;
std::list<void*> event_details;
memset(&event_entry, 0, sizeof(QSPlayerLogTrade_Struct));
// Perform actual trade
this->FinishTrade(other, true, &event_entry, &event_details);
other->FinishTrade(this, false, &event_entry, &event_details);
event_entry._detail_count = event_details.size();
auto qs_pack = new ServerPacket(
ServerOP_QSPlayerLogTrades,
sizeof(QSPlayerLogTrade_Struct) +
(sizeof(QSTradeItems_Struct) * event_entry._detail_count));
QSPlayerLogTrade_Struct* qs_buf = (QSPlayerLogTrade_Struct*)qs_pack->pBuffer;
memcpy(qs_buf, &event_entry, sizeof(QSPlayerLogTrade_Struct));
int offset = 0;
for (auto iter = event_details.begin(); iter != event_details.end();
++iter, ++offset) {
QSTradeItems_Struct* detail = reinterpret_cast<QSTradeItems_Struct*>(*iter);
qs_buf->items[offset] = *detail;
safe_delete(detail);
}
event_details.clear();
if (worldserver.Connected())
worldserver.SendPacket(qs_pack);
safe_delete(qs_pack);
// end QS code
}
else {
this->FinishTrade(other);
other->FinishTrade(this);
}
other->trade->Reset();
trade->Reset();
}
// All done
auto outapp = new EQApplicationPacket(OP_FinishTrade, 0);
other->QueuePacket(outapp);
this->FastQueuePacket(&outapp);
}
}
// Trading with a Mob object that is not a Client.
else if (with) {
auto outapp = new EQApplicationPacket(OP_FinishTrade, 0);
QueuePacket(outapp);
safe_delete(outapp);
if (with->IsNPC()) {
// Audit trade to database for player trade stream
if (RuleB(QueryServ, PlayerLogHandins)) {
QSPlayerLogHandin_Struct event_entry;
std::list<void*> event_details;
memset(&event_entry, 0, sizeof(QSPlayerLogHandin_Struct));
FinishTrade(with->CastToNPC(), false, &event_entry, &event_details);
event_entry._detail_count = event_details.size();
auto qs_pack =
new ServerPacket(ServerOP_QSPlayerLogHandins,
sizeof(QSPlayerLogHandin_Struct) +
(sizeof(QSHandinItems_Struct) * event_entry._detail_count));
QSPlayerLogHandin_Struct* qs_buf = (QSPlayerLogHandin_Struct*)qs_pack->pBuffer;
memcpy(qs_buf, &event_entry, sizeof(QSPlayerLogHandin_Struct));
int offset = 0;
for (auto iter = event_details.begin(); iter != event_details.end(); ++iter, ++offset) {
QSHandinItems_Struct* detail = reinterpret_cast<QSHandinItems_Struct*>(*iter);
qs_buf->items[offset] = *detail;
safe_delete(detail);
}
event_details.clear();
if (worldserver.Connected())
worldserver.SendPacket(qs_pack);
safe_delete(qs_pack);
}
else {
FinishTrade(with->CastToNPC());
}
}
#ifdef BOTS
// TODO: Log Bot trades
else if (with->IsBot())
with->CastToBot()->FinishTrade(this, Bot::BotTradeClientNormal);
#endif
trade->Reset();
}
return;
}
void Client::Handle_OP_TradeBusy(const EQApplicationPacket *app)
{
if (app->size != sizeof(TradeBusy_Struct)) {
LogError("Wrong size: OP_TradeBusy, size=[{}], expected [{}]", app->size, sizeof(TradeBusy_Struct));
return;
}
// Trade request recipient is cancelling the trade due to being busy
// Trade requester gets message "I'm busy right now"
// Send busy message on to trade initiator if client
TradeBusy_Struct* msg = (TradeBusy_Struct*)app->pBuffer;
Mob* tradee = entity_list.GetMob(msg->to_mob_id);
if (tradee && tradee->IsClient()) {
tradee->CastToClient()->QueuePacket(app);
}
return;
}
void Client::Handle_OP_Trader(const EQApplicationPacket *app)
{
// Bazaar Trader:
//
// SoF sends 1 or more unhandled OP_Trader packets of size 96 when a trade has completed.
// I don't know what they are for (yet), but it doesn't seem to matter that we ignore them.
uint32 max_items = 80;
/*
if (GetClientVersion() >= EQClientRoF)
max_items = 200;
*/
//Show Items
if (app->size == sizeof(Trader_ShowItems_Struct))
{
Trader_ShowItems_Struct* sis = (Trader_ShowItems_Struct*)app->pBuffer;
switch (sis->Code)
{
case BazaarTrader_EndTraderMode: {
Trader_EndTrader();
LogTrading("Client::Handle_OP_Trader: End Trader Session");
break;
}
case BazaarTrader_EndTransaction: {
Client* c = entity_list.GetClientByID(sis->TraderID);
if (c)
{
c->WithCustomer(0);
LogTrading("Client::Handle_OP_Trader: End Transaction");
}
else
LogTrading("Client::Handle_OP_Trader: Null Client Pointer");
break;
}
case BazaarTrader_ShowItems: {
Trader_ShowItems();
LogTrading("Client::Handle_OP_Trader: Show Trader Items");
break;
}
default: {
LogTrading("Unhandled action code in OP_Trader ShowItems_Struct");
break;
}
}
}
else if (app->size == sizeof(ClickTrader_Struct))
{
if (Buyer) {
Trader_EndTrader();
Message(Chat::Red, "You cannot be a Trader and Buyer at the same time.");
return;
}
ClickTrader_Struct* ints = (ClickTrader_Struct*)app->pBuffer;
if (ints->Code == BazaarTrader_StartTraderMode)
{
GetItems_Struct* gis = GetTraderItems();
LogTrading("Client::Handle_OP_Trader: Start Trader Mode");
// Verify there are no NODROP or items with a zero price
bool TradeItemsValid = true;
for (uint32 i = 0; i < max_items; i++) {
if (gis->Items[i] == 0) break;
if (ints->ItemCost[i] == 0) {
Message(Chat::Red, "Item in Trader Satchel with no price. Unable to start trader mode");
TradeItemsValid = false;
break;
}
const EQEmu::ItemData *Item = database.GetItem(gis->Items[i]);
if (!Item) {
Message(Chat::Red, "Unexpected error. Unable to start trader mode");
TradeItemsValid = false;
break;
}
if (Item->NoDrop == 0) {
Message(Chat::Red, "NODROP Item in Trader Satchel. Unable to start trader mode");
TradeItemsValid = false;
break;
}
}
if (!TradeItemsValid) {
Trader_EndTrader();
return;
}
for (uint32 i = 0; i < max_items; i++) {
if (database.GetItem(gis->Items[i])) {
database.SaveTraderItem(this->CharacterID(), gis->Items[i], gis->SerialNumber[i],
gis->Charges[i], ints->ItemCost[i], i);
auto inst = FindTraderItemBySerialNumber(gis->SerialNumber[i]);
if (inst)
inst->SetPrice(ints->ItemCost[i]);
}
else {
//return; //sony doesnt memset so assume done on first bad item
break;
}
}
safe_delete(gis);
this->Trader_StartTrader();
// This refreshes the Trader window to display the End Trader button
if (ClientVersion() >= EQEmu::versions::ClientVersion::RoF)
{
auto outapp = new EQApplicationPacket(OP_Trader, sizeof(TraderStatus_Struct));
TraderStatus_Struct* tss = (TraderStatus_Struct*)outapp->pBuffer;
tss->Code = BazaarTrader_StartTraderMode2;
QueuePacket(outapp);
safe_delete(outapp);
}
}
else {
LogTrading("Client::Handle_OP_Trader: Unknown TraderStruct code of: [{}]\n",
ints->Code);
LogError("Unknown TraderStruct code of: [{}]\n", ints->Code);
}
}
else if (app->size == sizeof(TraderStatus_Struct))
{
TraderStatus_Struct* tss = (TraderStatus_Struct*)app->pBuffer;
LogTrading("Client::Handle_OP_Trader: Trader Status Code: [{}]", tss->Code);
switch (tss->Code)
{
case BazaarTrader_EndTraderMode: {
Trader_EndTrader();
LogTrading("Client::Handle_OP_Trader: End Trader Session");
break;
}
case BazaarTrader_ShowItems: {
Trader_ShowItems();
LogTrading("Client::Handle_OP_Trader: Show Trader Items");
break;
}
default: {
LogTrading("Unhandled action code in OP_Trader ShowItems_Struct");
break;
}
}
}
else if (app->size == sizeof(TraderPriceUpdate_Struct))
{
LogTrading("Client::Handle_OP_Trader: Trader Price Update");
HandleTraderPriceUpdate(app);
}
else {
LogTrading("Unknown size for OP_Trader: [{}]\n", app->size);
LogError("Unknown size for OP_Trader: [{}]\n", app->size);
DumpPacket(app);
return;
}
return;
}
void Client::Handle_OP_TraderBuy(const EQApplicationPacket *app)
{
// Bazaar Trader:
//
// Client has elected to buy an item from a Trader
//
if (app->size != sizeof(TraderBuy_Struct)) {
LogError("Wrong size: OP_TraderBuy, size=[{}], expected [{}]", app->size, sizeof(TraderBuy_Struct));
return;
}
TraderBuy_Struct* tbs = (TraderBuy_Struct*)app->pBuffer;
if (Client* Trader = entity_list.GetClientByID(tbs->TraderID)) {
BuyTraderItem(tbs, Trader, app);
LogTrading("Client::Handle_OP_TraderBuy: Buy Trader Item ");
}
else {
LogTrading("Client::Handle_OP_TraderBuy: Null Client Pointer");
}
return;
}
void Client::Handle_OP_TradeRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(TradeRequest_Struct)) {
LogError("Wrong size: OP_TradeRequest, size=[{}], expected [{}]", app->size, sizeof(TradeRequest_Struct));
return;
}
// Client requesting a trade session from an npc/client
// Trade session not started until OP_TradeRequestAck is sent
CommonBreakInvisible();
// Pass trade request on to recipient
TradeRequest_Struct* msg = (TradeRequest_Struct*)app->pBuffer;
Mob* tradee = entity_list.GetMob(msg->to_mob_id);
if (tradee && tradee->IsClient()) {
tradee->CastToClient()->QueuePacket(app);
}
#ifndef BOTS
else if (tradee && tradee->IsNPC()) {
#else
else if (tradee && (tradee->IsNPC() || tradee->IsBot())) {
#endif
if (!tradee->IsEngaged()) {
trade->Start(msg->to_mob_id);
EQApplicationPacket *outapp = new EQApplicationPacket(OP_TradeRequestAck, sizeof(TradeRequest_Struct));
TradeRequest_Struct *acc = (TradeRequest_Struct *) outapp->pBuffer;
acc->from_mob_id = msg->to_mob_id;
acc->to_mob_id = msg->from_mob_id;
FastQueuePacket(&outapp);
safe_delete(outapp);
}
}
return;
}
void Client::Handle_OP_TradeRequestAck(const EQApplicationPacket *app)
{
if (app->size != sizeof(TradeRequest_Struct)) {
LogError("Wrong size: OP_TradeRequestAck, size=[{}], expected [{}]", app->size, sizeof(TradeRequest_Struct));
return;
}
// Trade request recipient is acknowledging they are able to trade
// After this, the trade session has officially started
// Send ack on to trade initiator if client
TradeRequest_Struct* msg = (TradeRequest_Struct*)app->pBuffer;
Mob* tradee = entity_list.GetMob(msg->to_mob_id);
if (tradee && tradee->IsClient()) {
trade->Start(msg->to_mob_id);
tradee->CastToClient()->QueuePacket(app);
}
return;
}
void Client::Handle_OP_TraderShop(const EQApplicationPacket *app)
{
// Bazaar Trader:
if (app->size == sizeof(TraderClick_Struct))
{
TraderClick_Struct* tcs = (TraderClick_Struct*)app->pBuffer;
LogTrading("Handle_OP_TraderShop: TraderClick_Struct TraderID [{}], Code [{}], Unknown008 [{}], Approval [{}]",
tcs->TraderID, tcs->Code, tcs->Unknown008, tcs->Approval);
if (tcs->Code == BazaarWelcome)
{
LogTrading("Client::Handle_OP_TraderShop: Sent Bazaar Welcome Info");
SendBazaarWelcome();
}
else
{
// This is when a potential purchaser right clicks on this client who is in Trader mode to
// browse their goods.
auto outapp = new EQApplicationPacket(OP_TraderShop, sizeof(TraderClick_Struct));
TraderClick_Struct* outtcs = (TraderClick_Struct*)outapp->pBuffer;
Client* Trader = entity_list.GetClientByID(tcs->TraderID);
if (Trader)
{
outtcs->Approval = Trader->WithCustomer(GetID());
LogTrading("Client::Handle_OP_TraderShop: Shop Request ([{}]) to ([{}]) with Approval: [{}]", GetCleanName(), Trader->GetCleanName(), outtcs->Approval);
}
else {
LogTrading("Client::Handle_OP_TraderShop: entity_list.GetClientByID(tcs->traderid)"
" returned a nullptr pointer");
safe_delete(outapp);
return;
}
outtcs->TraderID = tcs->TraderID;
outtcs->Unknown008 = 0x3f800000;
QueuePacket(outapp);
if (outtcs->Approval) {
this->BulkSendTraderInventory(Trader->CharacterID());
Trader->Trader_CustomerBrowsing(this);
TraderID = tcs->TraderID;
LogTrading("Client::Handle_OP_TraderShop: Trader Inventory Sent");
}
else
{
MessageString(Chat::Yellow, TRADER_BUSY);
LogTrading("Client::Handle_OP_TraderShop: Trader Busy");
}
safe_delete(outapp);
return;
}
}
else if (app->size == sizeof(BazaarWelcome_Struct))
{
// RoF+
// Client requested Bazaar Welcome Info (Trader and Item Total Counts)
SendBazaarWelcome();
LogTrading("Client::Handle_OP_TraderShop: Sent Bazaar Welcome Info");
}
else if (app->size == sizeof(TraderBuy_Struct))
{
// RoF+
// Customer has purchased an item from the Trader
TraderBuy_Struct* tbs = (TraderBuy_Struct*)app->pBuffer;
if (Client* Trader = entity_list.GetClientByID(tbs->TraderID))
{
BuyTraderItem(tbs, Trader, app);
LogTrading("Handle_OP_TraderShop: Buy Action [{}], Price [{}], Trader [{}], ItemID [{}], Quantity [{}], ItemName, [{}]",
tbs->Action, tbs->Price, tbs->TraderID, tbs->ItemID, tbs->Quantity, tbs->ItemName);
}
else
{
LogTrading("OP_TraderShop: Null Client Pointer");
}
}
else if (app->size == 4)
{
// RoF+
// Customer has closed the trade window
uint32 Command = *((uint32 *)app->pBuffer);
if (Command == 4)
{
Client* c = entity_list.GetClientByID(TraderID);
TraderID = 0;
if (c)
{
c->WithCustomer(0);
LogTrading("Client::Handle_OP_Trader: End Transaction - Code [{}]", Command);
}
else
{
LogTrading("Client::Handle_OP_Trader: Null Client Pointer for Trader - Code [{}]", Command);
}
EQApplicationPacket empty(OP_ShopEndConfirm);
QueuePacket(&empty);
}
else
{
LogTrading("Client::Handle_OP_Trader: Unhandled Code [{}]", Command);
}
}
else
{
LogTrading("Unknown size for OP_TraderShop: [{}]\n", app->size);
LogError("Unknown size for OP_TraderShop: [{}]\n", app->size);
DumpPacket(app);
return;
}
}
void Client::Handle_OP_TradeSkillCombine(const EQApplicationPacket *app)
{
if (app->size != sizeof(NewCombine_Struct)) {
LogError("Invalid size for NewCombine_Struct: Expected: [{}], Got: [{}]",
sizeof(NewCombine_Struct), app->size);
return;
}
/*if (m_tradeskill_object == nullptr) {
Message(Chat::Red, "Error: Server is not aware of the tradeskill container you are attempting to use");
return;
}*/
//fixed this to work for non-world objects
// Delegate to tradeskill object to perform combine
NewCombine_Struct* in_combine = (NewCombine_Struct*)app->pBuffer;
Object::HandleCombine(this, in_combine, m_tradeskill_object);
return;
}
void Client::Handle_OP_Translocate(const EQApplicationPacket *app)
{
if (app->size != sizeof(Translocate_Struct)) {
LogDebug("Size mismatch in OP_Translocate expected [{}] got [{}]", sizeof(Translocate_Struct), app->size);
DumpPacket(app);
return;
}
Translocate_Struct *its = (Translocate_Struct*)app->pBuffer;
if (!PendingTranslocate)
return;
if ((RuleI(Spells, TranslocateTimeLimit) > 0) && (time(nullptr) > (TranslocateTime + RuleI(Spells, TranslocateTimeLimit)))) {
Message(Chat::Red, "You did not accept the Translocate within the required time limit.");
PendingTranslocate = false;
return;
}
if (its->Complete == 1) {
int SpellID = PendingTranslocateData.spell_id;
int i = parse->EventSpell(EVENT_SPELL_EFFECT_TRANSLOCATE_COMPLETE, nullptr, this, SpellID, 0);
if (i == 0)
{
// If the spell has a translocate to bind effect, AND we are already in the zone the client
// is bound in, use the GoToBind method. If we send OP_Translocate in this case, the client moves itself
// to the bind coords it has from the PlayerProfile, but with the X and Y reversed. I suspect they are
// reversed in the pp, and since spells like Gate are handled serverside, this has not mattered before.
if (((SpellID == 1422) || (SpellID == 1334) || (SpellID == 3243)) &&
(zone->GetZoneID() == PendingTranslocateData.zone_id &&
zone->GetInstanceID() == PendingTranslocateData.instance_id))
{
PendingTranslocate = false;
GoToBind();
return;
}
////Was sending the packet back to initiate client zone...
////but that could be abusable, so lets go through proper channels
MovePC(PendingTranslocateData.zone_id, PendingTranslocateData.instance_id,
PendingTranslocateData.x, PendingTranslocateData.y,
PendingTranslocateData.z, PendingTranslocateData.heading, 0, ZoneSolicited);
}
}
PendingTranslocate = false;
}
void Client::Handle_OP_TributeItem(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeItem of length [{}]", app->size);
//player donates an item...
if (app->size != sizeof(TributeItem_Struct))
printf("Error in OP_TributeItem. Expected size of: %zu, but got: %i\n", sizeof(StartTribute_Struct), app->size);
else {
TributeItem_Struct* t = (TributeItem_Struct*)app->pBuffer;
tribute_master_id = t->tribute_master_id;
//make sure they are dealing with a valid tribute master
Mob* tribmast = entity_list.GetMob(t->tribute_master_id);
if (!tribmast || !tribmast->IsNPC() || tribmast->GetClass() != TRIBUTE_MASTER)
return;
if (DistanceSquared(m_Position, tribmast->GetPosition()) > USE_NPC_RANGE2)
return;
t->tribute_points = TributeItem(t->slot, t->quantity);
LogTribute("Sending tribute item reply with [{}] points", t->tribute_points);
QueuePacket(app);
}
return;
}
void Client::Handle_OP_TributeMoney(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeMoney of length [{}]", app->size);
//player donates money
if (app->size != sizeof(TributeMoney_Struct))
printf("Error in OP_TributeMoney. Expected size of: %zu, but got: %i\n", sizeof(StartTribute_Struct), app->size);
else {
TributeMoney_Struct* t = (TributeMoney_Struct*)app->pBuffer;
tribute_master_id = t->tribute_master_id;
//make sure they are dealing with a valid tribute master
Mob* tribmast = entity_list.GetMob(t->tribute_master_id);
if (!tribmast || !tribmast->IsNPC() || tribmast->GetClass() != TRIBUTE_MASTER)
return;
if (DistanceSquared(m_Position, tribmast->GetPosition()) > USE_NPC_RANGE2)
return;
t->tribute_points = TributeMoney(t->platinum);
LogTribute("Sending tribute money reply with [{}] points", t->tribute_points);
QueuePacket(app);
}
return;
}
void Client::Handle_OP_TributeNPC(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeNPC of length [{}]", app->size);
return;
}
void Client::Handle_OP_TributeToggle(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeToggle of length [{}]", app->size);
if (app->size != sizeof(uint32))
LogError("Invalid size on OP_TributeToggle packet");
else {
uint32 *val = (uint32 *)app->pBuffer;
ToggleTribute(*val ? true : false);
}
return;
}
void Client::Handle_OP_TributeUpdate(const EQApplicationPacket *app)
{
LogTribute("Received OP_TributeUpdate of length [{}]", app->size);
//sent when the client changes their tribute settings...
if (app->size != sizeof(TributeInfo_Struct))
LogError("Invalid size on OP_TributeUpdate packet");
else {
TributeInfo_Struct *t = (TributeInfo_Struct *)app->pBuffer;
ChangeTributeSettings(t);
}
return;
}
void Client::Handle_OP_VetClaimRequest(const EQApplicationPacket *app)
{
if (app->size < sizeof(VeteranClaim)) {
LogDebug("OP_VetClaimRequest size lower than expected: got [{}] expected at least [{}]", app->size, sizeof(VeteranClaim));
DumpPacket(app);
return;
}
VeteranClaim *vcr = (VeteranClaim *)app->pBuffer;
if (vcr->claim_id == 0xFFFFFFFF) { // request update packet
SendRewards();
return;
}
// try to claim something!
auto vetapp = new EQApplicationPacket(OP_VetClaimReply, sizeof(VeteranClaim));
VeteranClaim *cr = (VeteranClaim *)vetapp->pBuffer;
strcpy(cr->name, GetName());
cr->claim_id = vcr->claim_id;
if (!TryReward(vcr->claim_id))
cr->action = 1;
else
cr->action = 0;
FastQueuePacket(&vetapp);
}
void Client::Handle_OP_VoiceMacroIn(const EQApplicationPacket *app)
{
if (app->size != sizeof(VoiceMacroIn_Struct)) {
LogDebug("Size mismatch in OP_VoiceMacroIn expected [{}] got [{}]", sizeof(VoiceMacroIn_Struct), app->size);
DumpPacket(app);
return;
}
if (!RuleB(Chat, EnableVoiceMacros)) return;
VoiceMacroIn_Struct* vmi = (VoiceMacroIn_Struct*)app->pBuffer;
VoiceMacroReceived(vmi->Type, vmi->Target, vmi->MacroNumber);
}
void Client::Handle_OP_UpdateAura(const EQApplicationPacket *app)
{
if (app->size != sizeof(AuraDestory_Struct)) {
LogDebug("Size mismatch in OP_UpdateAura expected [{}] got [{}]", sizeof(AuraDestory_Struct), app->size);
return;
}
// client only sends this for removing
auto aura = (AuraDestory_Struct *)app->pBuffer;
if (aura->action != 1)
return; // could log I guess, but should only ever get this action
RemoveAura(aura->entity_id);
QueuePacket(app); // if we don't resend this, the client gets confused
return;
}
void Client::Handle_OP_WearChange(const EQApplicationPacket *app)
{
if (app->size != sizeof(WearChange_Struct)) {
std::cout << "Wrong size: OP_WearChange, size=" << app->size << ", expected " << sizeof(WearChange_Struct) << std::endl;
return;
}
WearChange_Struct* wc = (WearChange_Struct*)app->pBuffer;
if (wc->spawn_id != GetID())
return;
// Hero Forge ID needs to be fixed here as RoF2 appears to send an incorrect value.
if (wc->hero_forge_model != 0 && wc->wear_slot_id >= 0 && wc->wear_slot_id < EQEmu::textures::weaponPrimary)
wc->hero_forge_model = GetHerosForgeModel(wc->wear_slot_id);
// we could maybe ignore this and just send our own from moveitem
entity_list.QueueClients(this, app, true);
}
void Client::Handle_OP_WhoAllRequest(const EQApplicationPacket *app)
{
if (app->size != sizeof(Who_All_Struct)) {
std::cout << "Wrong size on OP_WhoAll. Got: " << app->size << ", Expected: " << sizeof(Who_All_Struct) << std::endl;
return;
}
Who_All_Struct* whoall = (Who_All_Struct*)app->pBuffer;
if (whoall->type == 0) // SoF only, for regular /who
entity_list.ZoneWho(this, whoall);
else
WhoAll(whoall);
return;
}
void Client::Handle_OP_XTargetAutoAddHaters(const EQApplicationPacket *app)
{
if (app->size != 1)
{
LogDebug("Size mismatch in OP_XTargetAutoAddHaters, expected 1, got [{}]", app->size);
DumpPacket(app);
return;
}
XTargetAutoAddHaters = app->ReadUInt8(0);
SetDirtyAutoHaters();
}
void Client::Handle_OP_XTargetOpen(const EQApplicationPacket *app)
{
if (app->size != 4) {
LogDebug("Size mismatch in OP_XTargetOpen, expected 1, got [{}]", app->size);
DumpPacket(app);
return;
}
auto outapp = new EQApplicationPacket(OP_XTargetOpenResponse, 0);
FastQueuePacket(&outapp);
}
void Client::Handle_OP_XTargetRequest(const EQApplicationPacket *app)
{
if (app->size < 12)
{
LogDebug("Size mismatch in OP_XTargetRequest, expected at least 12, got [{}]", app->size);
DumpPacket(app);
return;
}
uint32 Unknown000 = app->ReadUInt32(0);
if (Unknown000 != 1)
return;
uint32 Slot = app->ReadUInt32(4);
if (Slot >= XTARGET_HARDCAP)
return;
XTargetType Type = (XTargetType)app->ReadUInt32(8);
XTargets[Slot].Type = Type;
XTargets[Slot].ID = 0;
XTargets[Slot].Name[0] = 0;
switch (Type)
{
case Empty:
case Auto:
{
break;
}
case CurrentTargetPC:
{
char Name[65];
app->ReadString(Name, 12, 64);
Client *c = entity_list.GetClientByName(Name);
if (c)
{
XTargets[Slot].ID = c->GetID();
strncpy(XTargets[Slot].Name, c->GetName(), 64);
}
else
{
strncpy(XTargets[Slot].Name, Name, 64);
}
SendXTargetPacket(Slot, c);
break;
}
case CurrentTargetNPC:
{
char Name[65];
app->ReadString(Name, 12, 64);
Mob *m = entity_list.GetMob(Name);
if (m)
{
XTargets[Slot].ID = m->GetID();
SendXTargetPacket(Slot, m);
break;
}
}
case TargetsTarget:
{
if (GetTarget())
UpdateXTargetType(TargetsTarget, GetTarget()->GetTarget());
else
UpdateXTargetType(TargetsTarget, nullptr);
break;
}
case GroupTank:
{
Group *g = GetGroup();
if (g)
{
Client *c = entity_list.GetClientByName(g->GetMainTankName());
if (c)
{
XTargets[Slot].ID = c->GetID();
strncpy(XTargets[Slot].Name, c->GetName(), 64);
}
else
{
strncpy(XTargets[Slot].Name, g->GetMainTankName(), 64);
}
SendXTargetPacket(Slot, c);
}
break;
}
case GroupTankTarget:
{
Group *g = GetGroup();
if (g)
g->NotifyTankTarget(this);
break;
}
case GroupAssist:
{
Group *g = GetGroup();
if (g)
{
Client *c = entity_list.GetClientByName(g->GetMainAssistName());
if (c)
{
XTargets[Slot].ID = c->GetID();
strncpy(XTargets[Slot].Name, c->GetName(), 64);
}
else
{
strncpy(XTargets[Slot].Name, g->GetMainAssistName(), 64);
}
SendXTargetPacket(Slot, c);
}
break;
}
case GroupAssistTarget:
{
Group *g = GetGroup();
if (g)
g->NotifyAssistTarget(this);
break;
}
case Puller:
{
Group *g = GetGroup();
if (g)
{
Client *c = entity_list.GetClientByName(g->GetPullerName());
if (c)
{
XTargets[Slot].ID = c->GetID();
strncpy(XTargets[Slot].Name, c->GetName(), 64);
}
else
{
strncpy(XTargets[Slot].Name, g->GetPullerName(), 64);
}
SendXTargetPacket(Slot, c);
}
break;
}
case PullerTarget:
{
Group *g = GetGroup();
if (g)
g->NotifyPullerTarget(this);
break;
}
case GroupMarkTarget1:
case GroupMarkTarget2:
case GroupMarkTarget3:
{
Group *g = GetGroup();
if (g)
g->SendMarkedNPCsToMember(this);
break;
}
case RaidAssist1:
case RaidAssist2:
case RaidAssist3:
case RaidAssist1Target:
case RaidAssist2Target:
case RaidAssist3Target:
case RaidMarkTarget1:
case RaidMarkTarget2:
case RaidMarkTarget3:
{
// Not implemented yet.
break;
}
case MyPet:
{
Mob *m = GetPet();
if (m)
{
XTargets[Slot].ID = m->GetID();
SendXTargetPacket(Slot, m);
}
break;
}
case MyPetTarget:
{
Mob *m = GetPet();
if (m)
m = m->GetTarget();
if (m)
{
XTargets[Slot].ID = m->GetID();
SendXTargetPacket(Slot, m);
}
break;
}
default:
LogDebug("Unhandled XTarget Type [{}]", Type);
break;
}
}
void Client::Handle_OP_YellForHelp(const EQApplicationPacket *app)
{
auto outapp = new EQApplicationPacket(OP_YellForHelp, 4);
*(uint32 *)outapp->pBuffer = GetID();
entity_list.QueueCloseClients(this, outapp, true, 100.0);
safe_delete(outapp);
return;
}
void Client::Handle_OP_ResetAA(const EQApplicationPacket *app)
{
if (Admin() >= 50) {
Message(0, "Resetting AA points.");
ResetAA();
}
return;
}
| 1 | 9,924 | No need to use `this->` here | EQEmu-Server | cpp |
@@ -151,6 +151,7 @@ func (r *ReconcileClusterDeployment) reconcileExistingInstallingClusterInstall(c
statusModified = true
}
+ completed = controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallCompletedClusterDeploymentCondition)
if completed.Status == corev1.ConditionTrue { // the cluster install is complete
cd.Spec.Installed = true
cd.Status.InstalledTimestamp = &completed.LastTransitionTime | 1 | package clusterdeployment
import (
"context"
"reflect"
"time"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
hivev1 "github.com/openshift/hive/apis/hive/v1"
hivecontractsv1alpha1 "github.com/openshift/hive/apis/hivecontracts/v1alpha1"
"github.com/openshift/hive/pkg/constants"
hivemetrics "github.com/openshift/hive/pkg/controller/metrics"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
)
func (r *ReconcileClusterDeployment) reconcileExistingInstallingClusterInstall(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (reconcile.Result, error) {
ref := cd.Spec.ClusterInstallRef
gvk := schema.GroupVersionKind{
Group: ref.Group,
Version: ref.Version,
Kind: ref.Kind,
}
logger = logger.WithField("clusterinstall", ref.Name).WithField("gvk", gvk)
logger.Debug("reconciling existing clusterinstall")
ci := &hivecontractsv1alpha1.ClusterInstall{}
err := controllerutils.GetDuckType(context.TODO(), r.Client,
gvk,
types.NamespacedName{Namespace: cd.Namespace, Name: ref.Name},
ci)
if apierrors.IsNotFound(err) {
logger.Debug("cluster is not found, so skipping")
return reconcile.Result{}, nil
}
if err != nil {
logger.WithError(err).Error("failed to get the cluster install")
return reconcile.Result{}, err
}
specModified := false
statusModified := false
// copy the cluster metadata
if met := ci.Spec.ClusterMetadata; met != nil &&
met.InfraID != "" &&
met.ClusterID != "" &&
met.AdminKubeconfigSecretRef.Name != "" &&
met.AdminPasswordSecretRef.Name != "" {
if !reflect.DeepEqual(cd.Spec.ClusterMetadata, ci.Spec.ClusterMetadata) {
cd.Spec.ClusterMetadata = ci.Spec.ClusterMetadata
specModified = true
}
}
if cd.Status.InstallRestarts != ci.Status.InstallRestarts {
cd.Status.InstallRestarts = ci.Status.InstallRestarts
statusModified = true
}
conditions := cd.Status.Conditions
// copy the required conditions
requiredConditions := []string{
hivev1.ClusterInstallFailed,
hivev1.ClusterInstallCompleted,
hivev1.ClusterInstallStopped,
hivev1.ClusterInstallRequirementsMet,
}
for _, req := range requiredConditions {
cond := controllerutils.FindClusterInstallCondition(ci.Status.Conditions, req)
if cond == nil {
continue
}
updated := false
conditions, updated = controllerutils.SetClusterDeploymentConditionWithChangeCheck(conditions,
hivev1.ClusterDeploymentConditionType("ClusterInstall"+cond.Type), // this transformation is part of the contract
cond.Status,
cond.Reason,
cond.Message,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if updated {
statusModified = true
}
}
// additionally copy failed to provision failed condition
failed := controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallFailedClusterDeploymentCondition)
updated := false
conditions, updated = controllerutils.SetClusterDeploymentConditionWithChangeCheck(conditions,
hivev1.ProvisionFailedCondition, // this transformation is part of the contract
failed.Status,
failed.Reason,
failed.Message,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if updated {
statusModified = true
}
// take actions based on the conditions
// like,
// update install started time when requirements met.
// update installed = true when completed
// update the installed timestamp when complete
requirementsMet := controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallRequirementsMetClusterDeploymentCondition)
if requirementsMet.Status == corev1.ConditionTrue {
if !reflect.DeepEqual(cd.Status.InstallStartedTimestamp, &requirementsMet.LastTransitionTime) {
cd.Status.InstallStartedTimestamp = &requirementsMet.LastTransitionTime
statusModified = true
kickstartDuration := time.Since(ci.CreationTimestamp.Time)
logger.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to first provision seconds")
metricInstallDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
}
}
completed := controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallCompletedClusterDeploymentCondition)
stopped := controllerutils.FindClusterDeploymentCondition(conditions, hivev1.ClusterInstallStoppedClusterDeploymentCondition)
reason := stopped.Reason
msg := stopped.Message
if stopped.Status == corev1.ConditionTrue && completed.Status == corev1.ConditionFalse {
// we must have reached the limit for retrying and therefore
// gave up with not completed
reason = installAttemptsLimitReachedReason
msg = "Install attempts limit reached"
}
updated = false
conditions, updated = controllerutils.SetClusterDeploymentConditionWithChangeCheck(conditions,
hivev1.ProvisionStoppedCondition,
stopped.Status,
reason,
msg,
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
if updated {
statusModified = true
}
if completed.Status == corev1.ConditionTrue { // the cluster install is complete
cd.Spec.Installed = true
cd.Status.InstalledTimestamp = &completed.LastTransitionTime
specModified = true
statusModified = true
installStartTime := ci.CreationTimestamp
if cd.Status.InstallStartedTimestamp != nil {
installStartTime = *cd.Status.InstallStartedTimestamp // we expect that the install started when requirements met
}
installDuration := cd.Status.InstalledTimestamp.Sub(installStartTime.Time)
logger.WithField("duration", installDuration.Seconds()).Debug("install job completed")
metricInstallJobDuration.Observe(float64(installDuration.Seconds()))
metricCompletedInstallJobRestarts.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).
Observe(float64(cd.Status.InstallRestarts))
metricClustersInstalled.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
if r.protectedDelete {
// Set protected delete on for the ClusterDeployment.
// If the ClusterDeployment already has the ProtectedDelete annotation, do not overwrite it. This allows the
// user an opportunity to explicitly exclude a ClusterDeployment from delete protection at the time of
// creation of the ClusterDeployment.
if _, annotationPresent := cd.Annotations[constants.ProtectedDeleteAnnotation]; !annotationPresent {
initializeAnnotations(cd)
cd.Annotations[constants.ProtectedDeleteAnnotation] = "true"
specModified = true
}
}
}
if specModified {
if err := r.Update(context.TODO(), cd); err != nil {
logger.WithError(err).Error("failed to update the spec of clusterdeployment")
return reconcile.Result{}, err
}
}
if statusModified {
cd.Status.Conditions = conditions
if err := r.Status().Update(context.TODO(), cd); err != nil {
logger.WithError(err).Error("failed to update the status of clusterdeployment")
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
func getClusterImageSetFromClusterInstall(client client.Client, cd *hivev1.ClusterDeployment) (string, error) {
ref := cd.Spec.ClusterInstallRef
gvk := schema.GroupVersionKind{
Group: ref.Group,
Version: ref.Version,
Kind: ref.Kind,
}
ci := &hivecontractsv1alpha1.ClusterInstall{}
err := controllerutils.GetDuckType(context.TODO(), client,
gvk,
types.NamespacedName{Namespace: cd.Namespace, Name: ref.Name},
ci)
if err != nil {
return "", err
}
return ci.Spec.ImageSetRef.Name, nil
}
const clusterInstallIndexFieldName = "spec.clusterinstalls"
func indexClusterInstall(o client.Object) []string {
var res []string
cd := o.(*hivev1.ClusterDeployment)
if cd.Spec.ClusterInstallRef != nil {
res = append(res, cd.Spec.ClusterInstallRef.Name)
}
return res
}
func (r *ReconcileClusterDeployment) watchClusterInstall(gvk schema.GroupVersionKind, logger log.FieldLogger) error {
_, ok := r.watchingClusterInstall[gvk.String()]
if ok {
return nil
}
logger.WithField("gvk", gvk).Debug("adding cluster install watches")
obj := &unstructured.Unstructured{}
obj.SetGroupVersionKind(gvk)
err := r.watcher.Watch(&source.Kind{Type: obj}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request {
retval := []reconcile.Request{}
cdList := &hivev1.ClusterDeploymentList{}
if err := r.Client.List(context.TODO(), cdList,
client.MatchingFields{clusterInstallIndexFieldName: o.GetName()},
client.InNamespace(o.GetNamespace())); err != nil {
logger.WithError(err).Error("failed to list cluster deployment matching cluster install index")
return retval
}
for _, cd := range cdList.Items {
retval = append(retval, reconcile.Request{NamespacedName: types.NamespacedName{
Namespace: cd.Namespace,
Name: cd.Name,
}})
}
logger.WithField("retval", retval).Debug("trigger reconcile for clusterdeployments for cluster install objects")
return retval
}))
if err != nil {
return err
}
logger.WithField("gvk", gvk).Debug("added new watcher for cluster install")
r.watchingClusterInstall[gvk.String()] = struct{}{}
return nil
}
| 1 | 17,943 | I can't quite tell, is there a reason why we're finding this condition again when it was done a few lines above? Or was this a mistaken paste? | openshift-hive | go |
@@ -22,7 +22,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination namespaceCache_mock.go
+//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination cache_mock.go
package namespace
| 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination namespaceCache_mock.go
package namespace
import (
"fmt"
"hash/fnv"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
"go.temporal.io/server/common/cache"
"github.com/gogo/protobuf/proto"
namespacepb "go.temporal.io/api/namespace/v1"
"go.temporal.io/api/serviceerror"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
)
// ReplicationPolicy is the namespace's replication policy,
// derived from namespace's replication config
type ReplicationPolicy int
const (
// ReplicationPolicyOneCluster indicate that workflows does not need to be replicated
// applicable to local namespace & global namespace with one cluster
ReplicationPolicyOneCluster ReplicationPolicy = 0
// ReplicationPolicyMultiCluster indicate that workflows need to be replicated
ReplicationPolicyMultiCluster ReplicationPolicy = 1
)
const (
namespaceCacheInitialSize = 10 * 1024
namespaceCacheMaxSize = 64 * 1024
namespaceCacheTTL = 0 // 0 means infinity
// CacheMinRefreshInterval is a minimun namespace cache refresh interval.
CacheMinRefreshInterval = 2 * time.Second
// CacheRefreshInterval namespace cache refresh interval
CacheRefreshInterval = 10 * time.Second
// CacheRefreshFailureRetryInterval is the wait time
// if refreshment encounters error
CacheRefreshFailureRetryInterval = 1 * time.Second
namespaceCacheRefreshPageSize = 200
namespaceCacheInitialized int32 = 0
namespaceCacheStarted int32 = 1
namespaceCacheStopped int32 = 2
)
type (
// PrepareCallbackFn is function to be called before CallbackFn is called,
// it is guaranteed that PrepareCallbackFn and CallbackFn pair will be both called or non will be called
PrepareCallbackFn func()
// CallbackFn is function to be called when the namespace cache entries are changed
// it is guaranteed that PrepareCallbackFn and CallbackFn pair will be both called or non will be called
CallbackFn func(prevNamespaces []*CacheEntry, nextNamespaces []*CacheEntry)
// Cache is used the cache namespace information and configuration to avoid making too many calls to cassandra.
// This cache is mainly used by frontend for resolving namespace names to namespace uuids which are used throughout the
// system. Each namespace entry is kept in the cache for one hour but also has an expiry of 10 seconds. This results
// in updating the namespace entry every 10 seconds but in the case of a cassandra failure we can still keep on serving
// requests using the stale entry from cache upto an hour
Cache interface {
common.Daemon
RegisterNamespaceChangeCallback(shard int32, initialNotificationVersion int64, prepareCallback PrepareCallbackFn, callback CallbackFn)
UnregisterNamespaceChangeCallback(shard int32)
GetNamespace(name string) (*CacheEntry, error)
GetNamespaceByID(id string) (*CacheEntry, error)
GetNamespaceID(name string) (string, error)
GetNamespaceName(id string) (string, error)
GetAllNamespace() map[string]*CacheEntry
GetCacheSize() (sizeOfCacheByName int64, sizeOfCacheByID int64)
}
namespaceCache struct {
status int32
shutdownChan chan struct{}
cacheNameToID *atomic.Value
cacheByID *atomic.Value
metadataMgr persistence.MetadataManager
clusterMetadata cluster.Metadata
timeSource clock.TimeSource
metricsClient metrics.Client
logger log.Logger
// refresh lock is used to guarantee at most one
// coroutine is doing namespace refreshment
refreshLock sync.Mutex
lastRefreshTime atomic.Value
checkLock sync.Mutex
lastCheckTime time.Time
callbackLock sync.Mutex
prepareCallbacks map[int32]PrepareCallbackFn
callbacks map[int32]CallbackFn
}
// CacheEntries CacheEntry slice
CacheEntries []*CacheEntry
// CacheEntry contains the info and config for a namespace
CacheEntry struct {
clusterMetadata cluster.Metadata
sync.RWMutex
info *persistencespb.NamespaceInfo
config *persistencespb.NamespaceConfig
replicationConfig *persistencespb.NamespaceReplicationConfig
configVersion int64
failoverVersion int64
isGlobalNamespace bool
failoverNotificationVersion int64
notificationVersion int64
initialized bool
}
)
// NewNamespaceCache creates a new instance of cache for holding onto namespace information to reduce the load on persistence
func NewNamespaceCache(
metadataMgr persistence.MetadataManager,
clusterMetadata cluster.Metadata,
metricsClient metrics.Client,
logger log.Logger,
) Cache {
nscache := &namespaceCache{
status: namespaceCacheInitialized,
shutdownChan: make(chan struct{}),
cacheNameToID: &atomic.Value{},
cacheByID: &atomic.Value{},
metadataMgr: metadataMgr,
clusterMetadata: clusterMetadata,
timeSource: clock.NewRealTimeSource(),
metricsClient: metricsClient,
logger: logger,
prepareCallbacks: make(map[int32]PrepareCallbackFn),
callbacks: make(map[int32]CallbackFn),
}
nscache.cacheNameToID.Store(newCache())
nscache.cacheByID.Store(newCache())
nscache.lastRefreshTime.Store(time.Time{})
return nscache
}
func newCache() cache.Cache {
opts := &cache.Options{}
opts.InitialCapacity = namespaceCacheInitialSize
opts.TTL = namespaceCacheTTL
return cache.New(namespaceCacheMaxSize, opts)
}
func newCacheEntry(
clusterMetadata cluster.Metadata,
) *CacheEntry {
return &CacheEntry{
clusterMetadata: clusterMetadata,
initialized: false,
}
}
// NewGlobalCacheEntryForTest returns an entry with test data
func NewGlobalCacheEntryForTest(
info *persistencespb.NamespaceInfo,
config *persistencespb.NamespaceConfig,
repConfig *persistencespb.NamespaceReplicationConfig,
failoverVersion int64,
clusterMetadata cluster.Metadata,
) *CacheEntry {
return &CacheEntry{
info: info,
config: config,
isGlobalNamespace: true,
replicationConfig: repConfig,
failoverVersion: failoverVersion,
clusterMetadata: clusterMetadata,
}
}
// NewLocalCacheEntryForTest returns an entry with test data
func NewLocalCacheEntryForTest(
info *persistencespb.NamespaceInfo,
config *persistencespb.NamespaceConfig,
targetCluster string,
clusterMetadata cluster.Metadata,
) *CacheEntry {
return &CacheEntry{
info: info,
config: config,
isGlobalNamespace: false,
replicationConfig: &persistencespb.NamespaceReplicationConfig{
ActiveClusterName: targetCluster,
Clusters: []string{targetCluster},
},
failoverVersion: common.EmptyVersion,
clusterMetadata: clusterMetadata,
}
}
// NewNamespaceCacheEntryForTest returns an entry with test data
func NewNamespaceCacheEntryForTest(
info *persistencespb.NamespaceInfo,
config *persistencespb.NamespaceConfig,
isGlobalNamespace bool,
repConfig *persistencespb.NamespaceReplicationConfig,
failoverVersion int64,
clusterMetadata cluster.Metadata,
) *CacheEntry {
return &CacheEntry{
info: info,
config: config,
isGlobalNamespace: isGlobalNamespace,
replicationConfig: repConfig,
failoverVersion: failoverVersion,
clusterMetadata: clusterMetadata,
}
}
func (c *namespaceCache) GetCacheSize() (sizeOfCacheByName int64, sizeOfCacheByID int64) {
return int64(c.cacheByID.Load().(cache.Cache).Size()), int64(c.cacheNameToID.Load().(cache.Cache).Size())
}
// Start the background refresh of namespace
func (c *namespaceCache) Start() {
if !atomic.CompareAndSwapInt32(&c.status, namespaceCacheInitialized, namespaceCacheStarted) {
return
}
// initialize the cache by initial scan
err := c.refreshNamespaces()
if err != nil {
c.logger.Fatal("Unable to initialize namespace cache", tag.Error(err))
}
go c.refreshLoop()
}
// Stop the background refresh of namespace
func (c *namespaceCache) Stop() {
if !atomic.CompareAndSwapInt32(&c.status, namespaceCacheStarted, namespaceCacheStopped) {
return
}
close(c.shutdownChan)
}
func (c *namespaceCache) GetAllNamespace() map[string]*CacheEntry {
result := make(map[string]*CacheEntry)
ite := c.cacheByID.Load().(cache.Cache).Iterator()
defer ite.Close()
for ite.HasNext() {
entry := ite.Next()
id := entry.Key().(string)
namespaceCacheEntry := entry.Value().(*CacheEntry)
namespaceCacheEntry.RLock()
dup := namespaceCacheEntry.duplicate()
namespaceCacheEntry.RUnlock()
result[id] = dup
}
return result
}
// RegisterNamespaceChangeCallback set a namespace change callback
// WARN: the beforeCallback function will be triggered by namespace cache when holding the namespace cache lock,
// make sure the callback function will not call namespace cache again in case of dead lock
// afterCallback will be invoked when NOT holding the namespace cache lock.
func (c *namespaceCache) RegisterNamespaceChangeCallback(
shard int32,
initialNotificationVersion int64,
prepareCallback PrepareCallbackFn,
callback CallbackFn,
) {
c.callbackLock.Lock()
c.prepareCallbacks[shard] = prepareCallback
c.callbacks[shard] = callback
c.callbackLock.Unlock()
// this section is trying to make the shard catch up with namespace changes
namespaces := CacheEntries{}
for _, namespace := range c.GetAllNamespace() {
namespaces = append(namespaces, namespace)
}
// we mush notify the change in a ordered fashion
// since history shard have to update the shard info
// with namespace change version.
sort.Sort(namespaces)
var prevEntries []*CacheEntry
var nextEntries []*CacheEntry
for _, namespace := range namespaces {
if namespace.notificationVersion >= initialNotificationVersion {
prevEntries = append(prevEntries, nil)
nextEntries = append(nextEntries, namespace)
}
}
if len(prevEntries) > 0 {
prepareCallback()
callback(prevEntries, nextEntries)
}
}
// UnregisterNamespaceChangeCallback delete a namespace failover callback
func (c *namespaceCache) UnregisterNamespaceChangeCallback(
shard int32,
) {
c.callbackLock.Lock()
defer c.callbackLock.Unlock()
delete(c.prepareCallbacks, shard)
delete(c.callbacks, shard)
}
// GetNamespace retrieves the information from the cache if it exists, otherwise retrieves the information from metadata
// store and writes it to the cache with an expiry before returning back
func (c *namespaceCache) GetNamespace(
name string,
) (*CacheEntry, error) {
if name == "" {
return nil, serviceerror.NewInvalidArgument("Namespace is empty.")
}
return c.getNamespace(name)
}
// GetNamespaceByID retrieves the information from the cache if it exists, otherwise retrieves the information from metadata
// store and writes it to the cache with an expiry before returning back
func (c *namespaceCache) GetNamespaceByID(
id string,
) (*CacheEntry, error) {
if id == "" {
return nil, serviceerror.NewInvalidArgument("NamespaceID is empty.")
}
return c.getNamespaceByID(id)
}
// GetNamespaceID retrieves namespaceID by using GetNamespace
func (c *namespaceCache) GetNamespaceID(
name string,
) (string, error) {
entry, err := c.GetNamespace(name)
if err != nil {
return "", err
}
return entry.info.Id, nil
}
// GetNamespaceName returns namespace name given the namespace id
func (c *namespaceCache) GetNamespaceName(
id string,
) (string, error) {
entry, err := c.getNamespaceByID(id)
if err != nil {
return "", err
}
return entry.info.Name, nil
}
func (c *namespaceCache) refreshLoop() {
timer := time.NewTicker(CacheRefreshInterval)
defer timer.Stop()
for {
select {
case <-c.shutdownChan:
return
case <-timer.C:
for err := c.refreshNamespaces(); err != nil; err = c.refreshNamespaces() {
select {
case <-c.shutdownChan:
return
default:
c.logger.Error("Error refreshing namespace cache", tag.Error(err))
time.Sleep(CacheRefreshFailureRetryInterval)
}
}
}
}
}
func (c *namespaceCache) refreshNamespaces() error {
c.refreshLock.Lock()
defer c.refreshLock.Unlock()
return c.refreshNamespacesLocked()
}
// this function only refresh the namespaces in the v2 table
// the namespaces in the v1 table will be refreshed if cache is stale
func (c *namespaceCache) refreshNamespacesLocked() error {
now := c.timeSource.Now()
// first load the metadata record, then load namespaces
// this can guarantee that namespaces in the cache are not updated more than metadata record
metadata, err := c.metadataMgr.GetMetadata()
if err != nil {
return err
}
namespaceNotificationVersion := metadata.NotificationVersion
var token []byte
request := &persistence.ListNamespacesRequest{PageSize: namespaceCacheRefreshPageSize}
var namespaces CacheEntries
continuePage := true
for continuePage {
request.NextPageToken = token
response, err := c.metadataMgr.ListNamespaces(request)
if err != nil {
return err
}
token = response.NextPageToken
for _, namespace := range response.Namespaces {
namespaces = append(namespaces, c.buildEntryFromRecord(namespace))
}
continuePage = len(token) != 0
}
// we mush apply the namespace change by order
// since history shard have to update the shard info
// with namespace change version.
sort.Sort(namespaces)
var prevEntries []*CacheEntry
var nextEntries []*CacheEntry
// make a copy of the existing namespace cache, so we can calculate diff and do compare and swap
newCacheNameToID := newCache()
newCacheByID := newCache()
for _, namespace := range c.GetAllNamespace() {
newCacheNameToID.Put(namespace.info.Name, namespace.info.Id)
newCacheByID.Put(namespace.info.Id, namespace)
}
UpdateLoop:
for _, namespace := range namespaces {
if namespace.notificationVersion >= namespaceNotificationVersion {
// this guarantee that namespace change events before the
// namespaceNotificationVersion is loaded into the cache.
// the namespace change events after the namespaceNotificationVersion
// will be loaded into cache in the next refresh
break UpdateLoop
}
prevEntry, nextEntry, err := c.updateIDToNamespaceCache(newCacheByID, namespace.info.Id, namespace)
if err != nil {
return err
}
c.updateNameToIDCache(newCacheNameToID, nextEntry.info.Name, nextEntry.info.Id)
if prevEntry != nil {
prevEntries = append(prevEntries, prevEntry)
nextEntries = append(nextEntries, nextEntry)
}
}
// NOTE: READ REF BEFORE MODIFICATION
// ref: historyEngine.go registerNamespaceFailoverCallback function
c.callbackLock.Lock()
defer c.callbackLock.Unlock()
c.triggerNamespaceChangePrepareCallbackLocked()
c.cacheByID.Store(newCacheByID)
c.cacheNameToID.Store(newCacheNameToID)
c.triggerNamespaceChangeCallbackLocked(prevEntries, nextEntries)
// only update last refresh time when refresh succeeded
c.lastRefreshTime.Store(now)
return nil
}
func (c *namespaceCache) checkAndContinue(
name string,
id string,
) (bool, error) {
now := c.timeSource.Now()
if now.Sub(c.lastRefreshTime.Load().(time.Time)) < CacheMinRefreshInterval {
return false, nil
}
c.checkLock.Lock()
defer c.checkLock.Unlock()
now = c.timeSource.Now()
if now.Sub(c.lastCheckTime) < CacheMinRefreshInterval {
return true, nil
}
c.lastCheckTime = now
_, err := c.metadataMgr.GetNamespace(&persistence.GetNamespaceRequest{Name: name, ID: id})
if err != nil {
return false, err
}
return true, nil
}
func (c *namespaceCache) updateNameToIDCache(
cacheNameToID cache.Cache,
name string,
id string,
) {
cacheNameToID.Put(name, id)
}
func (c *namespaceCache) updateIDToNamespaceCache(
cacheByID cache.Cache,
id string,
record *CacheEntry,
) (*CacheEntry, *CacheEntry, error) {
elem, err := cacheByID.PutIfNotExist(id, newCacheEntry(c.clusterMetadata))
if err != nil {
return nil, nil, err
}
entry := elem.(*CacheEntry)
entry.Lock()
defer entry.Unlock()
var prevNamespace *CacheEntry
triggerCallback := c.clusterMetadata.IsGlobalNamespaceEnabled() &&
// initialized will be true when the entry contains valid data
entry.initialized &&
record.notificationVersion > entry.notificationVersion
if triggerCallback {
prevNamespace = entry.duplicate()
}
entry.info = record.info
entry.config = record.config
entry.replicationConfig = record.replicationConfig
entry.configVersion = record.configVersion
entry.failoverVersion = record.failoverVersion
entry.isGlobalNamespace = record.isGlobalNamespace
entry.failoverNotificationVersion = record.failoverNotificationVersion
entry.notificationVersion = record.notificationVersion
entry.initialized = record.initialized
nextNamespace := entry.duplicate()
return prevNamespace, nextNamespace, nil
}
// getNamespace retrieves the information from the cache if it exists, otherwise retrieves the information from metadata
// store and writes it to the cache with an expiry before returning back
func (c *namespaceCache) getNamespace(
name string,
) (*CacheEntry, error) {
id, cacheHit := c.cacheNameToID.Load().(cache.Cache).Get(name).(string)
if cacheHit {
return c.getNamespaceByID(id)
}
return nil, serviceerror.NewNotFound(fmt.Sprintf("namespace: %v not found", name))
}
// getNamespaceByID retrieves the information from the cache if it exists, otherwise retrieves the information from metadata
// store and writes it to the cache with an expiry before returning back
func (c *namespaceCache) getNamespaceByID(
id string,
) (*CacheEntry, error) {
var result *CacheEntry
entry, cacheHit := c.cacheByID.Load().(cache.Cache).Get(id).(*CacheEntry)
if cacheHit {
entry.RLock()
result = entry.duplicate()
entry.RUnlock()
return result, nil
}
return nil, serviceerror.NewNotFound(fmt.Sprintf("namespace ID: %v not found", id))
}
func (c *namespaceCache) triggerNamespaceChangePrepareCallbackLocked() {
sw := c.metricsClient.StartTimer(metrics.NamespaceCacheScope, metrics.NamespaceCachePrepareCallbacksLatency)
defer sw.Stop()
for _, prepareCallback := range c.prepareCallbacks {
prepareCallback()
}
}
func (c *namespaceCache) triggerNamespaceChangeCallbackLocked(
prevNamespaces []*CacheEntry,
nextNamespaces []*CacheEntry,
) {
sw := c.metricsClient.StartTimer(metrics.NamespaceCacheScope, metrics.NamespaceCacheCallbacksLatency)
defer sw.Stop()
for _, callback := range c.callbacks {
callback(prevNamespaces, nextNamespaces)
}
}
func (c *namespaceCache) buildEntryFromRecord(
record *persistence.GetNamespaceResponse,
) *CacheEntry {
// this is a shallow copy, but since the record is generated by persistence
// and only accessible here, it would be fine
newEntry := newCacheEntry(c.clusterMetadata)
newEntry.info = record.Namespace.Info
newEntry.config = record.Namespace.Config
newEntry.replicationConfig = record.Namespace.ReplicationConfig
newEntry.configVersion = record.Namespace.ConfigVersion
newEntry.failoverVersion = record.Namespace.FailoverVersion
newEntry.isGlobalNamespace = record.IsGlobalNamespace
newEntry.failoverNotificationVersion = record.Namespace.FailoverNotificationVersion
newEntry.notificationVersion = record.NotificationVersion
newEntry.initialized = true
return newEntry
}
func (entry *CacheEntry) duplicate() *CacheEntry {
// this is a deep copy
result := newCacheEntry(entry.clusterMetadata)
result.info = proto.Clone(entry.info).(*persistencespb.NamespaceInfo)
if result.info.Data == nil {
result.info.Data = make(map[string]string, 0)
}
result.config = proto.Clone(entry.config).(*persistencespb.NamespaceConfig)
if result.config.BadBinaries == nil || result.config.BadBinaries.Binaries == nil {
result.config.BadBinaries.Binaries = make(map[string]*namespacepb.BadBinaryInfo, 0)
}
result.replicationConfig = proto.Clone(entry.replicationConfig).(*persistencespb.NamespaceReplicationConfig)
result.configVersion = entry.configVersion
result.failoverVersion = entry.failoverVersion
result.isGlobalNamespace = entry.isGlobalNamespace
result.failoverNotificationVersion = entry.failoverNotificationVersion
result.notificationVersion = entry.notificationVersion
result.initialized = entry.initialized
return result
}
// GetInfo return the namespace info
func (entry *CacheEntry) GetInfo() *persistencespb.NamespaceInfo {
return entry.info
}
// GetConfig return the namespace config
func (entry *CacheEntry) GetConfig() *persistencespb.NamespaceConfig {
return entry.config
}
// GetReplicationConfig return the namespace replication config
func (entry *CacheEntry) GetReplicationConfig() *persistencespb.NamespaceReplicationConfig {
return entry.replicationConfig
}
// GetConfigVersion return the namespace config version
func (entry *CacheEntry) GetConfigVersion() int64 {
return entry.configVersion
}
// GetFailoverVersion return the namespace failover version
func (entry *CacheEntry) GetFailoverVersion() int64 {
return entry.failoverVersion
}
// IsGlobalNamespace return whether the namespace is a global namespace
func (entry *CacheEntry) IsGlobalNamespace() bool {
return entry.isGlobalNamespace
}
// GetFailoverNotificationVersion return the global notification version of when failover happened
func (entry *CacheEntry) GetFailoverNotificationVersion() int64 {
return entry.failoverNotificationVersion
}
// GetNotificationVersion return the global notification version of when namespace changed
func (entry *CacheEntry) GetNotificationVersion() int64 {
return entry.notificationVersion
}
// IsNamespaceActive return whether the namespace is active, i.e. non global namespace or global namespace which active cluster is the current cluster
func (entry *CacheEntry) IsNamespaceActive() bool {
if !entry.isGlobalNamespace {
// namespace is not a global namespace, meaning namespace is always "active" within each cluster
return true
}
return entry.clusterMetadata.GetCurrentClusterName() == entry.replicationConfig.ActiveClusterName
}
// GetReplicationPolicy return the derived workflow replication policy
func (entry *CacheEntry) GetReplicationPolicy() ReplicationPolicy {
// frontend guarantee that the clusters always contains the active namespace, so if the # of clusters is 1
// then we do not need to send out any events for replication
if entry.isGlobalNamespace && len(entry.replicationConfig.Clusters) > 1 {
return ReplicationPolicyMultiCluster
}
return ReplicationPolicyOneCluster
}
// GetNamespaceNotActiveErr return err if namespace is not active, nil otherwise
func (entry *CacheEntry) GetNamespaceNotActiveErr() error {
if entry.IsNamespaceActive() {
// namespace is consider active
return nil
}
return serviceerror.NewNamespaceNotActive(
entry.info.Name,
entry.clusterMetadata.GetCurrentClusterName(),
entry.replicationConfig.ActiveClusterName,
)
}
// Len return length
func (t CacheEntries) Len() int {
return len(t)
}
// Swap implements sort.Interface.
func (t CacheEntries) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
// Less implements sort.Interface
func (t CacheEntries) Less(i, j int) bool {
return t[i].notificationVersion < t[j].notificationVersion
}
// CreateNamespaceCacheEntry create a cache entry with namespace
func CreateNamespaceCacheEntry(
namespace string,
) *CacheEntry {
return &CacheEntry{info: &persistencespb.NamespaceInfo{Name: namespace}}
}
// SampleRetentionKey is key to specify sample retention
var SampleRetentionKey = "sample_retention_days"
// SampleRateKey is key to specify sample rate
var SampleRateKey = "sample_retention_rate"
// GetRetention returns retention in days for given workflow
func (entry *CacheEntry) GetRetention(
workflowID string,
) time.Duration {
if entry.config.Retention == nil {
return 0
}
if entry.IsSampledForLongerRetention(workflowID) {
if sampledRetentionValue, ok := entry.info.Data[SampleRetentionKey]; ok {
sampledRetentionDays, err := strconv.Atoi(sampledRetentionValue)
sampledRetention := *timestamp.DurationFromDays(int32(sampledRetentionDays))
if err != nil || sampledRetention < *entry.config.Retention {
return *entry.config.Retention
}
return sampledRetention
}
}
return *entry.config.Retention
}
// IsSampledForLongerRetentionEnabled return whether sample for longer retention is enabled or not
func (entry *CacheEntry) IsSampledForLongerRetentionEnabled(string) bool {
_, ok := entry.info.Data[SampleRateKey]
return ok
}
// IsSampledForLongerRetention return should given workflow been sampled or not
func (entry *CacheEntry) IsSampledForLongerRetention(
workflowID string,
) bool {
if sampledRateValue, ok := entry.info.Data[SampleRateKey]; ok {
sampledRate, err := strconv.ParseFloat(sampledRateValue, 64)
if err != nil {
return false
}
h := fnv.New32a()
_, err = h.Write([]byte(workflowID))
if err != nil {
return false
}
hash := h.Sum32()
r := float64(hash%1000) / float64(1000) // use 1000 so we support one decimal rate like 1.5%.
if r < sampledRate { // sampled
return true
}
}
return false
}
| 1 | 12,542 | doh. My fault, sorry. | temporalio-temporal | go |
@@ -80,3 +80,11 @@ func conditionIsProvider1(provider market.ServiceProposal) bool {
func conditionIsStreaming(provider market.ServiceProposal) bool {
return provider.ServiceType == "streaming"
}
+
+func fieldID(proposal market.ServiceProposal) interface{} {
+ return proposal.ID
+}
+
+func fieldProviderID(proposal market.ServiceProposal) interface{} {
+ return proposal.ProviderID
+} | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package reducer
import (
"github.com/mysteriumnetwork/node/market"
)
var (
provider1 = "0x1"
provider2 = "0x2"
serviceTypeStreaming = "streaming"
serviceTypeNoop = "noop"
accessRuleWhitelist = market.AccessPolicy{
ID: "whitelist",
Source: "whitelist.txt",
}
accessRuleBlacklist = market.AccessPolicy{
ID: "blacklist",
Source: "blacklist.txt",
}
locationDatacenter = market.Location{ASN: 1000, Country: "DE", City: "Berlin", NodeType: "datacenter"}
locationResidential = market.Location{ASN: 124, Country: "LT", City: "Vilnius", NodeType: "residential"}
proposalEmpty = market.ServiceProposal{}
proposalProvider1Streaming = market.ServiceProposal{
ProviderID: provider1,
ServiceType: serviceTypeStreaming,
ServiceDefinition: mockService{Location: locationDatacenter},
AccessPolicies: &[]market.AccessPolicy{accessRuleWhitelist},
}
proposalProvider1Noop = market.ServiceProposal{
ProviderID: provider1,
ServiceType: serviceTypeNoop,
ServiceDefinition: mockService{},
}
proposalProvider2Streaming = market.ServiceProposal{
ProviderID: provider2,
ServiceType: serviceTypeStreaming,
ServiceDefinition: mockService{Location: locationResidential},
AccessPolicies: &[]market.AccessPolicy{accessRuleWhitelist, accessRuleBlacklist},
}
)
type mockService struct {
Location market.Location
}
func (service mockService) GetLocation() market.Location {
return service.Location
}
func conditionAlwaysMatch(_ market.ServiceProposal) bool {
return true
}
func conditionNeverMatch(_ market.ServiceProposal) bool {
return false
}
func conditionIsProvider1(provider market.ServiceProposal) bool {
return provider.ProviderID == provider1
}
func conditionIsStreaming(provider market.ServiceProposal) bool {
return provider.ServiceType == "streaming"
}
| 1 | 14,593 | Why do we returning `interface{}` here? Don't we have a predefined type for `ID`? | mysteriumnetwork-node | go |
@@ -34,9 +34,12 @@ import MiniChart from 'GoogleComponents/mini-chart';
*/
import { trafficSourcesReportDataDefaults } from '../util';
-const { __, sprintf } = wp.i18n;
-const { map } = lodash;
-const { Component, Fragment } = wp.element;
+/**
+ * WordPress dependencies
+ */
+import { __, sprintf } from '@wordpress/i18n';
+import { map } from 'lodash';
+import { Component, Fragment } from '@wordpress/element';
class AnalyticsDashboardWidgetTopAcquisitionSources extends Component {
render() { | 1 | /**
* AnalyticsDashboardWidgetTopAcquisitionSources component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import withData from 'GoogleComponents/higherorder/withdata';
import { TYPE_MODULES } from 'GoogleComponents/data';
import {
getTimeInSeconds,
numberFormat,
getDateRangeFrom,
} from 'GoogleUtil';
import { getDataTableFromData, TableOverflowContainer } from 'GoogleComponents/data-table';
import PreviewTable from 'GoogleComponents/preview-table';
import MiniChart from 'GoogleComponents/mini-chart';
/**
* Internal dependencies
*/
import { trafficSourcesReportDataDefaults } from '../util';
const { __, sprintf } = wp.i18n;
const { map } = lodash;
const { Component, Fragment } = wp.element;
class AnalyticsDashboardWidgetTopAcquisitionSources extends Component {
render() {
const { data } = this.props;
if ( ! data || ! data.length ) {
return null;
}
const { dateRangeFrom } = getDateRangeFrom();
const headers = [
{
title: __( 'Medium', 'google-site-kit' ),
tooltip: __( 'Medium refers to where your traffic originated from', 'google-site-kit' ),
},
{
title: __( 'Users', 'google-site-kit' ),
tooltip: __( 'Number of users that originated from that traffic', 'google-site-kit' ),
},
{
title: __( 'New Users', 'google-site-kit' ),
tooltip: sprintf( __( 'Number of new users to visit your page over %s', 'google-site-kit' ), dateRangeFrom ),
},
{
title: __( 'Sessions', 'google-site-kit' ),
tooltip: sprintf( __( 'Number of sessions users had on your website over %s', 'google-site-kit' ), dateRangeFrom ),
},
{
title: __( 'Percentage', 'google-site-kit' ),
tooltip: __( 'Percentage of sessions', 'google-site-kit' ),
},
];
const totalSessions = data[ 0 ].data.totals[ 0 ].values[ 0 ];
const dataMapped = map( data[ 0 ].data.rows, ( row, i ) => {
const percent = ( row.metrics[ 0 ].values[ 0 ] / totalSessions * 100 );
// Exclude sources below 1%.
if ( 1 > percent ) {
return false;
}
return [
row.dimensions[ 0 ],
numberFormat( row.metrics[ 0 ].values[ 0 ] ),
numberFormat( row.metrics[ 0 ].values[ 1 ] ),
numberFormat( row.metrics[ 0 ].values[ 2 ] ),
<Fragment key={ 'minichart-analytics-top-as-' + i }><div className="googlesitekit-table__body-item-chart-wrap">{ `${ percent.toFixed( 2 ) }%` } <MiniChart percent={ percent.toFixed( 1 ) } index={ i } /></div></Fragment>,
];
} );
const options = {
hideHeader: false,
chartsEnabled: false,
};
const dataTable = getDataTableFromData( dataMapped, headers, options );
return (
<div className="googlesitekit-details-widget">
<TableOverflowContainer>
{ dataTable }
</TableOverflowContainer>
</div>
);
}
}
export default withData(
AnalyticsDashboardWidgetTopAcquisitionSources,
[
{
type: TYPE_MODULES,
identifier: 'analytics',
datapoint: 'report',
data: {
...trafficSourcesReportDataDefaults,
url: googlesitekit.permaLink,
},
priority: 1,
maxAge: getTimeInSeconds( 'day' ),
context: [ 'Single', 'Dashboard' ],
},
],
<PreviewTable
rows={ 4 }
rowHeight={ 50 }
/>
);
| 1 | 24,749 | `lodash` shouldn't be grouped under WordPress dependencies | google-site-kit-wp | js |
@@ -8,6 +8,8 @@ import (
"context"
"flag"
"fmt"
+ "github.com/rclone/rclone/fs/config/provider/goconfig"
+ "github.com/rclone/rclone/fs/config/provider/viper"
"io"
"io/ioutil"
"log" | 1 | // Package fstest provides utilities for testing the Fs
package fstest
// FIXME put name of test FS in Fs structure
import (
"bytes"
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
)
// Globals
var (
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
UseListR = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.")
// SizeLimit signals tests to skip maximum test file size and skip inappropriate runs
SizeLimit = flag.Int64("size-limit", 0, "Limit maximum test file size")
// ListRetries is the number of times to retry a listing to overcome eventual consistency
ListRetries = flag.Int("list-retries", 6, "Number or times to retry listing")
// MatchTestRemote matches the remote names used for testing
MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
)
// Seed the random number generator
func init() {
rand.Seed(time.Now().UnixNano())
}
// Initialise rclone for testing
func Initialise() {
// Never ask for passwords, fail instead.
// If your local config is encrypted set environment variable
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
fs.Config.AskPassword = false
// Override the config file from the environment - we don't
// parse the flags any more so this doesn't happen
// automatically
if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
config.ConfigPath = envConfig
}
config.LoadConfig()
if *Verbose {
fs.Config.LogLevel = fs.LogLevelDebug
}
if *DumpHeaders {
fs.Config.Dump |= fs.DumpHeaders
}
if *DumpBodies {
fs.Config.Dump |= fs.DumpBodies
}
fs.Config.LowLevelRetries = *LowLevelRetries
fs.Config.UseListR = *UseListR
}
// Item represents an item for checking
type Item struct {
Path string
Hashes map[hash.Type]string
ModTime time.Time
Size int64
}
// NewItem creates an item from a string content
func NewItem(Path, Content string, modTime time.Time) Item {
i := Item{
Path: Path,
ModTime: modTime,
Size: int64(len(Content)),
}
hash := hash.NewMultiHasher()
buf := bytes.NewBufferString(Content)
_, err := io.Copy(hash, buf)
if err != nil {
log.Fatalf("Failed to create item: %v", err)
}
i.Hashes = hash.Sums()
return i
}
// CheckTimeEqualWithPrecision checks the times are equal within the
// precision, returns the delta and a flag
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
dt := t0.Sub(t1)
if dt >= precision || dt <= -precision {
return dt, false
}
return dt, true
}
// CheckModTime checks the mod time to the given precision
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision))
}
// CheckHashes checks all the hashes the object supports are correct
func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
require.NotNil(t, obj)
types := obj.Fs().Hashes().Array()
for _, Hash := range types {
// Check attributes
sum, err := obj.Hash(context.Background(), Hash)
require.NoError(t, err)
assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum))
}
}
// Check checks all the attributes of the object are correct
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
i.CheckHashes(t, obj)
assert.Equal(t, i.Size, obj.Size(), fmt.Sprintf("%s: size incorrect file=%d vs obj=%d", i.Path, i.Size, obj.Size()))
i.CheckModTime(t, obj, obj.ModTime(context.Background()), precision)
}
// Normalize runs a utf8 normalization on the string if running on OS
// X. This is because OS X denormalizes file names it writes to the
// local file system.
func Normalize(name string) string {
if runtime.GOOS == "darwin" {
name = norm.NFC.String(name)
}
return name
}
// Items represents all items for checking
type Items struct {
byName map[string]*Item
byNameAlt map[string]*Item
items []Item
}
// NewItems makes an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
byNameAlt: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[Normalize(items[i].Path)] = &items[i]
}
return is
}
// Find checks off an item
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
remote := Normalize(obj.Remote())
i, ok := is.byName[remote]
if !ok {
i, ok = is.byNameAlt[remote]
assert.True(t, ok, fmt.Sprintf("Unexpected file %q", remote))
}
if i != nil {
delete(is.byName, i.Path)
i.Check(t, obj, precision)
}
}
// Done checks all finished
func (is *Items) Done(t *testing.T) {
if len(is.byName) != 0 {
for name := range is.byName {
t.Logf("Not found %q", name)
}
}
assert.Equal(t, 0, len(is.byName), fmt.Sprintf("%d objects not found", len(is.byName)))
}
// makeListingFromItems returns a string representation of the items
//
// it returns two possible strings, one normal and one for windows
func makeListingFromItems(items []Item) string {
nameLengths := make([]string, len(items))
for i, item := range items {
remote := Normalize(item.Path)
nameLengths[i] = fmt.Sprintf("%s (%d)", remote, item.Size)
}
sort.Strings(nameLengths)
return strings.Join(nameLengths, ", ")
}
// makeListingFromObjects returns a string representation of the objects
func makeListingFromObjects(objs []fs.Object) string {
nameLengths := make([]string, len(objs))
for i, obj := range objs {
nameLengths[i] = fmt.Sprintf("%s (%d)", Normalize(obj.Remote()), obj.Size())
}
sort.Strings(nameLengths)
return strings.Join(nameLengths, ", ")
}
// filterEmptyDirs removes any empty (or containing only directories)
// directories from expectedDirs
func filterEmptyDirs(t *testing.T, items []Item, expectedDirs []string) (newExpectedDirs []string) {
dirs := map[string]struct{}{"": struct{}{}}
for _, item := range items {
base := item.Path
for {
base = path.Dir(base)
if base == "." || base == "/" {
break
}
dirs[base] = struct{}{}
}
}
for _, expectedDir := range expectedDirs {
if _, found := dirs[expectedDir]; found {
newExpectedDirs = append(newExpectedDirs, expectedDir)
} else {
t.Logf("Filtering empty directory %q", expectedDir)
}
}
return newExpectedDirs
}
// CheckListingWithRoot checks the fs to see if it has the
// expected contents with the given precision.
//
// If expectedDirs is non nil then we check those too. Note that no
// directories returned is also OK as some remotes don't return
// directories.
//
// dir is the directory used for the listing.
func CheckListingWithRoot(t *testing.T, f fs.Fs, dir string, items []Item, expectedDirs []string, precision time.Duration) {
if expectedDirs != nil && !f.Features().CanHaveEmptyDirectories {
expectedDirs = filterEmptyDirs(t, items, expectedDirs)
}
is := NewItems(items)
ctx := context.Background()
oldErrors := accounting.Stats(ctx).GetErrors()
var objs []fs.Object
var dirs []fs.Directory
var err error
var retries = *ListRetries
sleep := time.Second / 2
wantListing := makeListingFromItems(items)
gotListing := "<unset>"
listingOK := false
for i := 1; i <= retries; i++ {
objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1)
if err != nil && err != fs.ErrorDirNotFound {
t.Fatalf("Error listing: %v", err)
}
gotListing = makeListingFromObjects(objs)
listingOK = wantListing == gotListing
if listingOK && (expectedDirs == nil || len(dirs) == len(expectedDirs)) {
// Put an extra sleep in if we did any retries just to make sure it really
// is consistent (here is looking at you Amazon Drive!)
if i != 1 {
extraSleep := 5*time.Second + sleep
t.Logf("Sleeping for %v just to make sure", extraSleep)
time.Sleep(extraSleep)
}
break
}
sleep *= 2
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
time.Sleep(sleep)
if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil {
t.Logf("Flushing the directory cache")
doDirCacheFlush()
}
}
assert.True(t, listingOK, fmt.Sprintf("listing wrong, want\n %s got\n %s", wantListing, gotListing))
for _, obj := range objs {
require.NotNil(t, obj)
is.Find(t, obj, precision)
}
is.Done(t)
// Don't notice an error when listing an empty directory
if len(items) == 0 && oldErrors == 0 && accounting.Stats(ctx).GetErrors() == 1 {
accounting.Stats(ctx).ResetErrors()
}
// Check the directories
if expectedDirs != nil {
expectedDirsCopy := make([]string, len(expectedDirs))
for i, dir := range expectedDirs {
expectedDirsCopy[i] = Normalize(dir)
}
actualDirs := []string{}
for _, dir := range dirs {
actualDirs = append(actualDirs, Normalize(dir.Remote()))
}
sort.Strings(actualDirs)
sort.Strings(expectedDirsCopy)
assert.Equal(t, expectedDirsCopy, actualDirs, "directories")
}
}
// CheckListingWithPrecision checks the fs to see if it has the
// expected contents with the given precision.
//
// If expectedDirs is non nil then we check those too. Note that no
// directories returned is also OK as some remotes don't return
// directories.
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs []string, precision time.Duration) {
CheckListingWithRoot(t, f, "", items, expectedDirs, precision)
}
// CheckListing checks the fs to see if it has the expected contents
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
precision := f.Precision()
CheckListingWithPrecision(t, f, items, nil, precision)
}
// CheckItems checks the fs to see if it has only the items passed in
// using a precision of fs.Config.ModifyWindow
func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
CheckListingWithPrecision(t, f, items, nil, fs.GetModifyWindow(f))
}
// CompareItems compares a set of DirEntries to a slice of items and a list of dirs
// The modtimes are compared with the precision supplied
func CompareItems(t *testing.T, entries fs.DirEntries, items []Item, expectedDirs []string, precision time.Duration, what string) {
is := NewItems(items)
var objs []fs.Object
var dirs []fs.Directory
wantListing := makeListingFromItems(items)
for _, entry := range entries {
switch x := entry.(type) {
case fs.Directory:
dirs = append(dirs, x)
case fs.Object:
objs = append(objs, x)
// do nothing
default:
t.Fatalf("unknown object type %T", entry)
}
}
gotListing := makeListingFromObjects(objs)
listingOK := wantListing == gotListing
assert.True(t, listingOK, fmt.Sprintf("%s not equal, want\n %s got\n %s", what, wantListing, gotListing))
for _, obj := range objs {
require.NotNil(t, obj)
is.Find(t, obj, precision)
}
is.Done(t)
// Check the directories
if expectedDirs != nil {
expectedDirsCopy := make([]string, len(expectedDirs))
for i, dir := range expectedDirs {
expectedDirsCopy[i] = Normalize(dir)
}
actualDirs := []string{}
for _, dir := range dirs {
actualDirs = append(actualDirs, Normalize(dir.Remote()))
}
sort.Strings(actualDirs)
sort.Strings(expectedDirsCopy)
assert.Equal(t, expectedDirsCopy, actualDirs, "directories not equal")
}
}
// Time parses a time string or logs a fatal error
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// LocalRemote creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone")
if err == nil {
// Now remove the directory
err = os.Remove(path)
}
path = filepath.ToSlash(path)
return
}
// RandomRemoteName makes a random bucket or subdirectory name
//
// Returns a random remote name plus the leaf name
func RandomRemoteName(remoteName string) (string, string, error) {
var err error
var leafName string
// Make a directory if remote name is null
if remoteName == "" {
remoteName, err = LocalRemote()
if err != nil {
return "", "", err
}
} else {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = "rclone-test-" + random.String(24)
if !MatchTestRemote.MatchString(leafName) {
log.Fatalf("%q didn't match the test remote name regexp", leafName)
}
remoteName += leafName
}
return remoteName, leafName, nil
}
// RandomRemote makes a random bucket or subdirectory on the remote
// from the -remote parameter
//
// Call the finalise function returned to Purge the fs at the end (and
// the parent if necessary)
//
// Returns the remote, its url, a finaliser and an error
func RandomRemote() (fs.Fs, string, func(), error) {
var err error
var parentRemote fs.Fs
remoteName := *RemoteName
remoteName, _, err = RandomRemoteName(remoteName)
if err != nil {
return nil, "", nil, err
}
remote, err := fs.NewFs(remoteName)
if err != nil {
return nil, "", nil, err
}
finalise := func() {
Purge(remote)
if parentRemote != nil {
Purge(parentRemote)
if err != nil {
log.Printf("Failed to purge %v: %v", parentRemote, err)
}
}
}
return remote, remoteName, finalise, nil
}
// Purge is a simplified re-implementation of operations.Purge for the
// test routine cleanup to avoid circular dependencies.
//
// It logs errors rather than returning them
func Purge(f fs.Fs) {
ctx := context.Background()
var err error
doFallbackPurge := true
if doPurge := f.Features().Purge; doPurge != nil {
doFallbackPurge = false
fs.Debugf(f, "Purge remote")
err = doPurge(ctx)
if err == fs.ErrorCantPurge {
doFallbackPurge = true
}
}
if doFallbackPurge {
dirs := []string{""}
err = walk.ListR(ctx, f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
var err error
entries.ForObject(func(obj fs.Object) {
fs.Debugf(f, "Purge object %q", obj.Remote())
err = obj.Remove(ctx)
if err != nil {
log.Printf("purge failed to remove %q: %v", obj.Remote(), err)
}
})
entries.ForDir(func(dir fs.Directory) {
dirs = append(dirs, dir.Remote())
})
return nil
})
sort.Strings(dirs)
for i := len(dirs) - 1; i >= 0; i-- {
dir := dirs[i]
fs.Debugf(f, "Purge dir %q", dir)
err := f.Rmdir(ctx, dir)
if err != nil {
log.Printf("purge failed to rmdir %q: %v", dir, err)
}
}
}
if err != nil {
log.Printf("purge failed: %v", err)
}
}
| 1 | 9,513 | File is not `goimports`-ed (from `goimports`) | rclone-rclone | go |
@@ -96,6 +96,11 @@ class InteropServiceHelper {
}
static async defaultFilename(noteIds, fileExtension) {
+ // Use of == here is because noteIds is potentiall undefined
+ if (noteIds == null) {
+ return '';
+ }
+
const note = await Note.load(noteIds[0]);
// In a rare case the passed not will be null, use the id for filename
if (note === null) { | 1 | const { _ } = require('lib/locale');
const { bridge } = require('electron').remote.require('./bridge');
const InteropService = require('lib/services/InteropService');
const Setting = require('lib/models/Setting');
const Note = require('lib/models/Note.js');
const Folder = require('lib/models/Folder.js');
const { friendlySafeFilename } = require('lib/path-utils');
const md5 = require('md5');
const url = require('url');
const { shim } = require('lib/shim');
class InteropServiceHelper {
static async exportNoteToHtmlFile(noteId, exportOptions) {
const tempFile = `${Setting.value('tempDir')}/${md5(Date.now() + Math.random())}.html`;
exportOptions = Object.assign({}, {
path: tempFile,
format: 'html',
target: 'file',
sourceNoteIds: [noteId],
customCss: '',
}, exportOptions);
const service = new InteropService();
const result = await service.export(exportOptions);
console.info('Export HTML result: ', result);
return tempFile;
}
static async exportNoteTo_(target, noteId, options = {}) {
let win = null;
let htmlFile = null;
const cleanup = () => {
if (win) win.destroy();
if (htmlFile) shim.fsDriver().remove(htmlFile);
};
try {
const exportOptions = {
customCss: options.customCss ? options.customCss : '',
};
htmlFile = await this.exportNoteToHtmlFile(noteId, exportOptions);
const windowOptions = {
show: false,
};
win = bridge().newBrowserWindow(windowOptions);
return new Promise((resolve, reject) => {
win.webContents.on('did-finish-load', async () => {
if (target === 'pdf') {
try {
const data = await win.webContents.printToPDF(options);
resolve(data);
} catch (error) {
reject(error);
} finally {
cleanup();
}
} else {
win.webContents.print(options, (success, reason) => {
// TODO: This is correct but broken in Electron 4. Need to upgrade to 5+
// It calls the callback right away with "false" even if the document hasn't be print yet.
cleanup();
if (!success && reason !== 'cancelled') reject(new Error(`Could not print: ${reason}`));
resolve();
});
}
});
win.loadURL(url.format({
pathname: htmlFile,
protocol: 'file:',
slashes: true,
}));
});
} catch (error) {
cleanup();
throw error;
}
}
static async exportNoteToPdf(noteId, options = {}) {
return this.exportNoteTo_('pdf', noteId, options);
}
static async printNote(noteId, options = {}) {
return this.exportNoteTo_('printer', noteId, options);
}
static async defaultFilename(noteIds, fileExtension) {
const note = await Note.load(noteIds[0]);
// In a rare case the passed not will be null, use the id for filename
if (note === null) {
const filename = friendlySafeFilename(noteIds[0], 100);
return `${filename}.${fileExtension}`;
}
const folder = await Folder.load(note.parent_id);
const filename = friendlySafeFilename(note.title, 100);
// In a less rare case the folder will be null, just ignore it
if (folder === null) {
return `${filename}.${fileExtension}`;
}
const foldername = friendlySafeFilename(folder.title, 100);
// friendlySafeFilename assumes that the file extension is added after
return `${foldername} - ${filename}.${fileExtension}`;
}
static async export(dispatch, module, options = null) {
if (!options) options = {};
let path = null;
if (module.target === 'file') {
path = bridge().showSaveDialog({
filters: [{ name: module.description, extensions: module.fileExtensions }],
defaultPath: await this.defaultFilename(options.sourceNoteIds, module.fileExtensions[0]),
});
} else {
path = bridge().showOpenDialog({
properties: ['openDirectory', 'createDirectory'],
});
}
if (!path || (Array.isArray(path) && !path.length)) return;
if (Array.isArray(path)) path = path[0];
dispatch({
type: 'WINDOW_COMMAND',
name: 'showModalMessage',
message: _('Exporting to "%s" as "%s" format. Please wait...', path, module.format),
});
const exportOptions = {};
exportOptions.path = path;
exportOptions.format = module.format;
exportOptions.modulePath = module.path;
exportOptions.target = module.target;
if (options.sourceFolderIds) exportOptions.sourceFolderIds = options.sourceFolderIds;
if (options.sourceNoteIds) exportOptions.sourceNoteIds = options.sourceNoteIds;
const service = new InteropService();
try {
const result = await service.export(exportOptions);
console.info('Export result: ', result);
} catch (error) {
console.error(error);
bridge().showErrorMessageBox(_('Could not export notes: %s', error.message));
}
dispatch({
type: 'WINDOW_COMMAND',
name: 'hideModalMessage',
});
}
}
module.exports = InteropServiceHelper;
| 1 | 14,126 | Not sure why you want to make the distinction between null and undefined? And also `null == undefined` anyway so I think you could simply have `!noteIds` here. | laurent22-joplin | js |
@@ -21,6 +21,15 @@
package http
const (
+ // ApplicationHeaderPrefix is the prefix added to application headers over
+ // the wire.
+ ApplicationHeaderPrefix = "Rpc-Header-"
+
+ // ContextHeaderPrefix is the prefix added to context headers over the wire.
+ ContextHeaderPrefix = "Context-"
+
+ // TODO(abg): Allow customizing header prefixes
+
// CallerHeader is the HTTP header used to indiate the service doing the calling
CallerHeader = "Rpc-Caller"
| 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
const (
// CallerHeader is the HTTP header used to indiate the service doing the calling
CallerHeader = "Rpc-Caller"
// EncodingHeader is the HTTP header used to specify the name of the
// encoding.
EncodingHeader = "Rpc-Encoding"
// TTLMSHeader is the HTTP header used to indicate the ttl in ms
TTLMSHeader = "Context-TTL-MS"
// ProcedureHeader is the HTTP header used to indicate the procedure
ProcedureHeader = "Rpc-Procedure"
// ServiceHeader is the HTTP header used to indicate the service
ServiceHeader = "Rpc-Service"
// ApplicationHeaderPrefix is the prefix added to application headers over
// the wire.
ApplicationHeaderPrefix = "Rpc-Header-"
)
// TODO Make consistent with other languages^
| 1 | 9,871 | Almost makes me wonder if this should default to Rpc-Context so everything defaults under Rpc- | yarpc-yarpc-go | go |
@@ -26,7 +26,7 @@ import (
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
- accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
+ "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution/evm"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"math/big"
"os"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/facebookgo/clock"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution/evm"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool/actioniterator"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/prometheustimer"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/pkg/util/fileutil"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
)
var (
blockMtc = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "iotex_block_metrics",
Help: "Block metrics.",
},
[]string{"type"},
)
errDelegatesNotExist = errors.New("delegates cannot be found")
)
func init() {
prometheus.MustRegister(blockMtc)
}
// Blockchain represents the blockchain data structure and hosts the APIs to access it
type Blockchain interface {
lifecycle.StartStopper
// Balance returns balance of an account
Balance(addr string) (*big.Int, error)
// Nonce returns the nonce if the account exists
Nonce(addr string) (uint64, error)
// CreateState adds a new account with initial balance to the factory
CreateState(addr string, init *big.Int) (*state.Account, error)
// CandidatesByHeight returns the candidate list by a given height
CandidatesByHeight(height uint64) ([]*state.Candidate, error)
// ProductivityByEpoch returns the number of produced blocks per delegate in an epoch
ProductivityByEpoch(epochNum uint64) (uint64, map[string]uint64, error)
// For exposing blockchain states
// GetHeightByHash returns Block's height by hash
GetHeightByHash(h hash.Hash256) (uint64, error)
// GetHashByHeight returns Block's hash by height
GetHashByHeight(height uint64) (hash.Hash256, error)
// GetBlockByHeight returns Block by height
GetBlockByHeight(height uint64) (*block.Block, error)
// GetBlockByHash returns Block by hash
GetBlockByHash(h hash.Hash256) (*block.Block, error)
// BlockHeaderByHeight return block header by height
BlockHeaderByHeight(height uint64) (*block.Header, error)
// BlockHeaderByHash return block header by hash
BlockHeaderByHash(h hash.Hash256) (*block.Header, error)
// BlockFooterByHeight return block footer by height
BlockFooterByHeight(height uint64) (*block.Footer, error)
// BlockFooterByHash return block footer by hash
BlockFooterByHash(h hash.Hash256) (*block.Footer, error)
// GetTotalActions returns the total number of actions
GetTotalActions() (uint64, error)
// GetReceiptByActionHash returns the receipt by action hash
GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error)
// GetActionsFromAddress returns actions from address
GetActionsFromAddress(address string) ([]hash.Hash256, error)
// GetActionsToAddress returns actions to address
GetActionsToAddress(address string) ([]hash.Hash256, error)
// GetActionCountByAddress returns action count by address
GetActionCountByAddress(address string) (uint64, error)
// GetActionByActionHash returns action by action hash
GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error)
// GetBlockHashByActionHash returns Block hash by action hash
GetBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error)
// GetFactory returns the state factory
GetFactory() factory.Factory
// GetChainID returns the chain ID
ChainID() uint32
// ChainAddress returns chain address on parent chain, the root chain return empty.
ChainAddress() string
// TipHash returns tip block's hash
TipHash() hash.Hash256
// TipHeight returns tip block's height
TipHeight() uint64
// StateByAddr returns account of a given address
StateByAddr(address string) (*state.Account, error)
// RecoverChainAndState recovers the chain to target height and refresh state db if necessary
RecoverChainAndState(targetHeight uint64) error
// GenesisTimestamp returns the timestamp of genesis
GenesisTimestamp() int64
// For block operations
// MintNewBlock creates a new block with given actions
// Note: the coinbase transfer will be added to the given transfers when minting a new block
MintNewBlock(
actionMap map[string][]action.SealedEnvelope,
timestamp time.Time,
) (*block.Block, error)
// CommitBlock validates and appends a block to the chain
CommitBlock(blk *block.Block) error
// ValidateBlock validates a new block before adding it to the blockchain
ValidateBlock(blk *block.Block) error
// For action operations
// Validator returns the current validator object
Validator() Validator
// SetValidator sets the current validator object
SetValidator(val Validator)
// For smart contract operations
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
ExecuteContractRead(caller address.Address, ex *action.Execution) ([]byte, *action.Receipt, error)
// AddSubscriber make you listen to every single produced block
AddSubscriber(BlockCreationSubscriber) error
// RemoveSubscriber make you listen to every single produced block
RemoveSubscriber(BlockCreationSubscriber) error
}
// blockchain implements the Blockchain interface
type blockchain struct {
mu sync.RWMutex // mutex to protect utk, tipHeight and tipHash
dao *blockDAO
config config.Config
tipHeight uint64
tipHash hash.Hash256
validator Validator
lifecycle lifecycle.Lifecycle
clk clock.Clock
blocklistener []BlockCreationSubscriber
timerFactory *prometheustimer.TimerFactory
// used by account-based model
sf factory.Factory
registry *protocol.Registry
enableExperimentalActions bool
}
// Option sets blockchain construction parameter
type Option func(*blockchain, config.Config) error
// DefaultStateFactoryOption sets blockchain's sf from config
func DefaultStateFactoryOption() Option {
return func(bc *blockchain, cfg config.Config) (err error) {
if cfg.Chain.EnableTrielessStateDB {
bc.sf, err = factory.NewStateDB(cfg, factory.DefaultStateDBOption())
} else {
bc.sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
}
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
return nil
}
}
// PrecreatedStateFactoryOption sets blockchain's state.Factory to sf
func PrecreatedStateFactoryOption(sf factory.Factory) Option {
return func(bc *blockchain, conf config.Config) error {
bc.sf = sf
return nil
}
}
// InMemStateFactoryOption sets blockchain's factory.Factory as in memory sf
func InMemStateFactoryOption() Option {
return func(bc *blockchain, cfg config.Config) error {
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
bc.sf = sf
return nil
}
}
// PrecreatedDaoOption sets blockchain's dao
func PrecreatedDaoOption(dao *blockDAO) Option {
return func(bc *blockchain, conf config.Config) error {
bc.dao = dao
return nil
}
}
// BoltDBDaoOption sets blockchain's dao with BoltDB from config.Chain.ChainDBPath
func BoltDBDaoOption() Option {
return func(bc *blockchain, cfg config.Config) error {
cfg.DB.DbPath = cfg.Chain.ChainDBPath // TODO: remove this after moving TrieDBPath from cfg.Chain to cfg.DB
_, gateway := cfg.Plugins[config.GatewayPlugin]
bc.dao = newBlockDAO(
db.NewOnDiskDB(cfg.DB),
gateway && !cfg.Chain.EnableAsyncIndexWrite,
cfg.Chain.CompressBlock,
cfg.Chain.MaxCacheSize,
)
return nil
}
}
// InMemDaoOption sets blockchain's dao with MemKVStore
func InMemDaoOption() Option {
return func(bc *blockchain, cfg config.Config) error {
_, gateway := cfg.Plugins[config.GatewayPlugin]
bc.dao = newBlockDAO(
db.NewMemKVStore(),
gateway && !cfg.Chain.EnableAsyncIndexWrite,
cfg.Chain.CompressBlock,
cfg.Chain.MaxCacheSize,
)
return nil
}
}
// ClockOption overrides the default clock
func ClockOption(clk clock.Clock) Option {
return func(bc *blockchain, conf config.Config) error {
bc.clk = clk
return nil
}
}
// RegistryOption sets the blockchain with the protocol registry
func RegistryOption(registry *protocol.Registry) Option {
return func(bc *blockchain, conf config.Config) error {
bc.registry = registry
return nil
}
}
// EnableExperimentalActions enables the blockchain to process experimental actions
func EnableExperimentalActions() Option {
return func(bc *blockchain, conf config.Config) error {
bc.enableExperimentalActions = true
return nil
}
}
// NewBlockchain creates a new blockchain and DB instance
func NewBlockchain(cfg config.Config, opts ...Option) Blockchain {
// create the Blockchain
chain := &blockchain{
config: cfg,
clk: clock.New(),
}
for _, opt := range opts {
if err := opt(chain, cfg); err != nil {
log.S().Panicf("Failed to execute blockchain creation option %p: %v", opt, err)
}
}
timerFactory, err := prometheustimer.New(
"iotex_blockchain_perf",
"Performance of blockchain module",
[]string{"topic", "chainID"},
[]string{"default", strconv.FormatUint(uint64(cfg.Chain.ID), 10)},
)
if err != nil {
log.L().Panic("Failed to generate prometheus timer factory.", zap.Error(err))
}
chain.timerFactory = timerFactory
// Set block validator
if err != nil {
log.L().Panic("Failed to get block producer address.", zap.Error(err))
}
chain.validator = &validator{
sf: chain.sf,
validatorAddr: cfg.ProducerAddress().String(),
enableExperimentalActions: chain.enableExperimentalActions,
}
if chain.dao != nil {
chain.lifecycle.Add(chain.dao)
}
if chain.sf != nil {
chain.lifecycle.Add(chain.sf)
}
return chain
}
func (bc *blockchain) ChainID() uint32 {
return atomic.LoadUint32(&bc.config.Chain.ID)
}
func (bc *blockchain) ChainAddress() string {
return bc.config.Chain.Address
}
// Start starts the blockchain
func (bc *blockchain) Start(ctx context.Context) (err error) {
bc.mu.Lock()
defer bc.mu.Unlock()
if err = bc.lifecycle.OnStart(ctx); err != nil {
return err
}
// get blockchain tip height
if bc.tipHeight, err = bc.dao.getBlockchainHeight(); err != nil {
return err
}
if bc.tipHeight == 0 {
return bc.startEmptyBlockchain()
}
// get blockchain tip hash
if bc.tipHash, err = bc.dao.getBlockHash(bc.tipHeight); err != nil {
return err
}
return bc.startExistingBlockchain()
}
// Stop stops the blockchain.
func (bc *blockchain) Stop(ctx context.Context) error {
bc.mu.Lock()
defer bc.mu.Unlock()
return bc.lifecycle.OnStop(ctx)
}
// Balance returns balance of address
func (bc *blockchain) Balance(addr string) (*big.Int, error) {
return bc.sf.Balance(addr)
}
// Nonce returns the nonce if the account exists
func (bc *blockchain) Nonce(addr string) (uint64, error) {
return bc.sf.Nonce(addr)
}
// CandidatesByHeight returns the candidate list by a given height
func (bc *blockchain) CandidatesByHeight(height uint64) ([]*state.Candidate, error) {
return bc.candidatesByHeight(height)
}
// ProductivityByEpoch returns the map of the number of blocks produced per delegate in an epoch
func (bc *blockchain) ProductivityByEpoch(epochNum uint64) (uint64, map[string]uint64, error) {
p, ok := bc.registry.Find(rolldpos.ProtocolID)
if !ok {
return 0, nil, errors.New("rolldpos protocol is not registered")
}
rp, ok := p.(*rolldpos.Protocol)
if !ok {
return 0, nil, errors.New("fail to cast rolldpos protocol")
}
var isCurrentEpoch bool
currentEpochNum := rp.GetEpochNum(bc.tipHeight)
if epochNum > currentEpochNum {
return 0, nil, errors.New("epoch number is larger than current epoch number")
}
if epochNum == currentEpochNum {
isCurrentEpoch = true
}
epochStartHeight := rp.GetEpochHeight(epochNum)
var epochEndHeight uint64
if isCurrentEpoch {
epochEndHeight = bc.tipHeight
} else {
epochEndHeight = rp.GetEpochLastBlockHeight(epochNum)
}
numBlks := epochEndHeight - epochStartHeight + 1
p, ok = bc.registry.Find(poll.ProtocolID)
if !ok {
return 0, nil, errors.New("poll protocol is not registered")
}
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: bc.tipHeight,
Registry: bc.registry,
})
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return 0, nil, err
}
s, err := p.ReadState(ctx, ws, []byte("ActiveBlockProducersByEpoch"),
byteutil.Uint64ToBytes(epochNum))
if err != nil {
return 0, nil, status.Error(codes.NotFound, err.Error())
}
var activeConsensusBlockProducers state.CandidateList
if err := activeConsensusBlockProducers.Deserialize(s); err != nil {
return 0, nil, err
}
produce := make(map[string]uint64)
for _, bp := range activeConsensusBlockProducers {
produce[bp.Address] = 0
}
for i := uint64(0); i < numBlks; i++ {
blk, err := bc.blockHeaderByHeight(epochStartHeight + i)
if err != nil {
return 0, nil, err
}
produce[blk.ProducerAddress()]++
}
return numBlks, produce, nil
}
// GetHeightByHash returns block's height by hash
func (bc *blockchain) GetHeightByHash(h hash.Hash256) (uint64, error) {
return bc.dao.getBlockHeight(h)
}
// GetHashByHeight returns block's hash by height
func (bc *blockchain) GetHashByHeight(height uint64) (hash.Hash256, error) {
return bc.dao.getBlockHash(height)
}
// GetBlockByHeight returns block from the blockchain hash by height
func (bc *blockchain) GetBlockByHeight(height uint64) (*block.Block, error) {
blk, err := bc.getBlockByHeight(height)
if blk == nil || err != nil {
return blk, err
}
blk.HeaderLogger(log.L()).Debug("Get block.")
return blk, err
}
// GetBlockByHash returns block from the blockchain hash by hash
func (bc *blockchain) GetBlockByHash(h hash.Hash256) (*block.Block, error) {
return bc.dao.getBlock(h)
}
func (bc *blockchain) BlockHeaderByHeight(height uint64) (*block.Header, error) {
return bc.blockHeaderByHeight(height)
}
func (bc *blockchain) BlockHeaderByHash(h hash.Hash256) (*block.Header, error) {
return bc.dao.Header(h)
}
func (bc *blockchain) BlockFooterByHeight(height uint64) (*block.Footer, error) {
return bc.blockFooterByHeight(height)
}
func (bc *blockchain) BlockFooterByHash(h hash.Hash256) (*block.Footer, error) {
return bc.dao.Footer(h)
}
// GetTotalActions returns the total number of actions
func (bc *blockchain) GetTotalActions() (uint64, error) {
return bc.dao.getTotalActions()
}
// GetReceiptByActionHash returns the receipt by action hash
func (bc *blockchain) GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) {
return bc.dao.getReceiptByActionHash(h)
}
// GetActionsFromAddress returns actions from address
func (bc *blockchain) GetActionsFromAddress(addrStr string) ([]hash.Hash256, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return nil, err
}
return getActionsBySenderAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
}
// GetActionToAddress returns action to address
func (bc *blockchain) GetActionsToAddress(addrStr string) ([]hash.Hash256, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return nil, err
}
return getActionsByRecipientAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
}
// GetActionCountByAddress returns action count by address
func (bc *blockchain) GetActionCountByAddress(addrStr string) (uint64, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return 0, err
}
fromCount, err := getActionCountBySenderAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
if err != nil {
return 0, err
}
toCount, err := getActionCountByRecipientAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
if err != nil {
return 0, err
}
return fromCount + toCount, nil
}
func (bc *blockchain) getActionByActionHashHelper(h hash.Hash256) (hash.Hash256, error) {
return getBlockHashByActionHash(bc.dao.kvstore, h)
}
// GetActionByActionHash returns action by action hash
func (bc *blockchain) GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) {
blkHash, err := bc.getActionByActionHashHelper(h)
if err != nil {
return action.SealedEnvelope{}, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return action.SealedEnvelope{}, err
}
for _, act := range blk.Actions {
if act.Hash() == h {
return act, nil
}
}
return action.SealedEnvelope{}, errors.Errorf("block %x does not have transfer %x", blkHash, h)
}
// GetBlockHashByActionHash returns Block hash by action hash
func (bc *blockchain) GetBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) {
return getBlockHashByActionHash(bc.dao.kvstore, h)
}
// GetFactory returns the state factory
func (bc *blockchain) GetFactory() factory.Factory {
return bc.sf
}
// TipHash returns tip block's hash
func (bc *blockchain) TipHash() hash.Hash256 {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.tipHash
}
// TipHeight returns tip block's height
func (bc *blockchain) TipHeight() uint64 {
return atomic.LoadUint64(&bc.tipHeight)
}
// ValidateBlock validates a new block before adding it to the blockchain
func (bc *blockchain) ValidateBlock(blk *block.Block) error {
bc.mu.RLock()
defer bc.mu.RUnlock()
timer := bc.timerFactory.NewTimer("ValidateBlock")
defer timer.End()
return bc.validateBlock(blk)
}
func (bc *blockchain) MintNewBlock(
actionMap map[string][]action.SealedEnvelope,
timestamp time.Time,
) (*block.Block, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
mintNewBlockTimer := bc.timerFactory.NewTimer("MintNewBlock")
defer mintNewBlockTimer.End()
newblockHeight := bc.tipHeight + 1
// run execution and update state trie root hash
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrap(err, "Failed to obtain working set from state factory")
}
gasLimitForContext := bc.config.Genesis.BlockGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
BlockHeight: newblockHeight,
BlockTimeStamp: timestamp,
Producer: bc.config.ProducerAddress(),
GasLimit: gasLimitForContext,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
Registry: bc.registry,
})
_, rc, actions, err := bc.pickAndRunActions(ctx, actionMap, ws)
if err != nil {
return nil, errors.Wrapf(err, "Failed to update state changes in new block %d", newblockHeight)
}
blockMtc.WithLabelValues("numActions").Set(float64(len(actions)))
sk := bc.config.ProducerPrivateKey()
ra := block.NewRunnableActionsBuilder().
SetHeight(newblockHeight).
SetTimeStamp(timestamp).
AddActions(actions...).
Build(sk.PublicKey())
prevBlkHash := bc.tipHash
// The first block's previous block hash is pointing to the digest of genesis config. This is to guarantee all nodes
// could verify that they start from the same genesis
if newblockHeight == 1 {
prevBlkHash = bc.config.Genesis.Hash()
}
blk, err := block.NewBuilder(ra).
SetPrevBlockHash(prevBlkHash).
SetDeltaStateDigest(ws.Digest()).
SetReceipts(rc).
SetReceiptRoot(calculateReceiptRoot(rc)).
SignAndBuild(sk)
if err != nil {
return nil, errors.Wrapf(err, "failed to create block")
}
blk.WorkingSet = ws
return &blk, nil
}
// CommitBlock validates and appends a block to the chain
func (bc *blockchain) CommitBlock(blk *block.Block) error {
bc.mu.Lock()
defer bc.mu.Unlock()
timer := bc.timerFactory.NewTimer("CommitBlock")
defer timer.End()
return bc.commitBlock(blk)
}
// StateByAddr returns the account of an address
func (bc *blockchain) StateByAddr(address string) (*state.Account, error) {
if bc.sf != nil {
s, err := bc.sf.AccountState(address)
if err != nil {
log.L().Warn("Failed to get account.", zap.String("address", address), zap.Error(err))
return nil, errors.New("account does not exist")
}
return s, nil
}
return nil, errors.New("state factory is nil")
}
// SetValidator sets the current validator object
func (bc *blockchain) SetValidator(val Validator) {
bc.mu.Lock()
defer bc.mu.Unlock()
bc.validator = val
}
// Validator gets the current validator object
func (bc *blockchain) Validator() Validator {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.validator
}
func (bc *blockchain) AddSubscriber(s BlockCreationSubscriber) error {
bc.mu.Lock()
defer bc.mu.Unlock()
log.L().Info("Add a subscriber.")
if s == nil {
return errors.New("subscriber could not be nil")
}
bc.blocklistener = append(bc.blocklistener, s)
return nil
}
func (bc *blockchain) RemoveSubscriber(s BlockCreationSubscriber) error {
bc.mu.Lock()
defer bc.mu.Unlock()
for i, sub := range bc.blocklistener {
if sub == s {
bc.blocklistener = append(bc.blocklistener[:i], bc.blocklistener[i+1:]...)
log.L().Info("Successfully unsubscribe block creation.")
return nil
}
}
return errors.New("cannot find subscription")
}
//======================================
// internal functions
//=====================================
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
func (bc *blockchain) ExecuteContractRead(caller address.Address, ex *action.Execution) ([]byte, *action.Receipt, error) {
// use latest block as carrier to run the offline execution
// the block itself is not used
h := bc.TipHeight()
header, err := bc.BlockHeaderByHeight(h)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to get block in ExecuteContractRead")
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, nil, errors.Wrap(err, "failed to obtain working set from state factory")
}
producer, err := address.FromString(header.ProducerAddress())
if err != nil {
return nil, nil, err
}
gasLimit := bc.config.Genesis.BlockGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: header.Height(),
BlockTimeStamp: header.Timestamp(),
Producer: producer,
Caller: caller,
GasLimit: gasLimit,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
GasPrice: big.NewInt(0),
IntrinsicGas: 0,
})
return evm.ExecuteContract(
ctx,
ws,
ex,
bc,
)
}
// CreateState adds a new account with initial balance to the factory
func (bc *blockchain) CreateState(addr string, init *big.Int) (*state.Account, error) {
if bc.sf == nil {
return nil, errors.New("empty state factory")
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrapf(err, "failed to create clean working set")
}
account, err := accountutil.LoadOrCreateAccount(ws, addr, init)
if err != nil {
return nil, errors.Wrapf(err, "failed to create new account %s", addr)
}
gasLimit := bc.config.Genesis.BlockGasLimit
callerAddr, err := address.FromString(addr)
if err != nil {
return nil, err
}
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
GasLimit: gasLimit,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
Caller: callerAddr,
ActionHash: hash.ZeroHash256,
Nonce: 0,
Registry: bc.registry,
})
if _, err = ws.RunActions(ctx, 0, nil); err != nil {
return nil, errors.Wrap(err, "failed to run the account creation")
}
if err = bc.sf.Commit(ws); err != nil {
return nil, errors.Wrap(err, "failed to commit the account creation")
}
return account, nil
}
// RecoverChainAndState recovers the chain to target height and refresh state db if necessary
func (bc *blockchain) RecoverChainAndState(targetHeight uint64) error {
var buildStateFromScratch bool
stateHeight, err := bc.sf.Height()
if err != nil {
buildStateFromScratch = true
}
if targetHeight > 0 {
if err := bc.recoverToHeight(targetHeight); err != nil {
return errors.Wrapf(err, "failed to recover blockchain to target height %d", targetHeight)
}
if stateHeight > bc.tipHeight {
buildStateFromScratch = true
}
}
if buildStateFromScratch {
return bc.refreshStateDB()
}
return nil
}
func (bc *blockchain) GenesisTimestamp() int64 {
return bc.config.Genesis.Timestamp
}
//======================================
// private functions
//=====================================
func (bc *blockchain) protocol(id string) (protocol.Protocol, bool) {
if bc.registry == nil {
return nil, false
}
return bc.registry.Find(id)
}
func (bc *blockchain) mustGetRollDPoSProtocol() *rolldpos.Protocol {
p, ok := bc.protocol(rolldpos.ProtocolID)
if !ok {
log.L().Panic("protocol rolldpos has not been registered")
}
rp, ok := p.(*rolldpos.Protocol)
if !ok {
log.L().Panic("failed to cast to rolldpos protocol")
}
return rp
}
func (bc *blockchain) candidatesByHeight(height uint64) (state.CandidateList, error) {
if bc.config.Genesis.EnableGravityChainVoting {
rp := bc.mustGetRollDPoSProtocol()
return bc.sf.CandidatesByHeight(rp.GetEpochHeight(rp.GetEpochNum(height)))
}
for {
candidates, err := bc.sf.CandidatesByHeight(height)
if err == nil {
return candidates, nil
}
if height == 0 {
return nil, err
}
height--
}
}
func (bc *blockchain) getBlockByHeight(height uint64) (*block.Block, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.getBlock(hash)
}
func (bc *blockchain) blockHeaderByHeight(height uint64) (*block.Header, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.Header(hash)
}
func (bc *blockchain) blockFooterByHeight(height uint64) (*block.Footer, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.Footer(hash)
}
func (bc *blockchain) startEmptyBlockchain() error {
var ws factory.WorkingSet
var err error
if ws, err = bc.sf.NewWorkingSet(); err != nil {
return errors.Wrap(err, "failed to obtain working set from state factory")
}
if !bc.config.Chain.EmptyGenesis {
// Initialize the states before any actions happen on the blockchain
if err := bc.createGenesisStates(ws); err != nil {
return err
}
_ = ws.UpdateBlockLevelInfo(0)
}
// add Genesis states
if err := bc.sf.Commit(ws); err != nil {
return errors.Wrap(err, "failed to commit Genesis states")
}
return nil
}
func (bc *blockchain) startExistingBlockchain() error {
if bc.sf == nil {
return errors.New("statefactory cannot be nil")
}
stateHeight, err := bc.sf.Height()
if err != nil {
return err
}
if stateHeight > bc.tipHeight {
return errors.New("factory is higher than blockchain")
}
for i := stateHeight + 1; i <= bc.tipHeight; i++ {
blk, err := bc.getBlockByHeight(i)
if err != nil {
return err
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "failed to obtain working set from state factory")
}
if _, err := bc.runActions(blk.RunnableActions(), ws); err != nil {
return err
}
if err := bc.sf.Commit(ws); err != nil {
return err
}
}
stateHeight, err = bc.sf.Height()
if err != nil {
return errors.Wrap(err, "failed to get factory's height")
}
log.L().Info("Restarting blockchain.",
zap.Uint64("chainHeight",
bc.tipHeight),
zap.Uint64("factoryHeight", stateHeight))
return nil
}
func (bc *blockchain) validateBlock(blk *block.Block) error {
validateTimer := bc.timerFactory.NewTimer("validate")
prevBlkHash := bc.tipHash
if blk.Height() == 1 {
prevBlkHash = bc.config.Genesis.Hash()
}
err := bc.validator.Validate(blk, bc.tipHeight, prevBlkHash)
validateTimer.End()
if err != nil {
return errors.Wrapf(err, "error when validating block %d", blk.Height())
}
// run actions and update state factory
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "Failed to obtain working set from state factory")
}
runTimer := bc.timerFactory.NewTimer("runActions")
receipts, err := bc.runActions(blk.RunnableActions(), ws)
runTimer.End()
if err != nil {
log.L().Panic("Failed to update state.", zap.Uint64("tipHeight", bc.tipHeight), zap.Error(err))
}
if err = blk.VerifyDeltaStateDigest(ws.Digest()); err != nil {
return err
}
if err = blk.VerifyReceiptRoot(calculateReceiptRoot(receipts)); err != nil {
return errors.Wrap(err, "Failed to verify receipt root")
}
blk.Receipts = receipts
// attach working set to be committed to state factory
blk.WorkingSet = ws
return nil
}
// commitBlock commits a block to the chain
func (bc *blockchain) commitBlock(blk *block.Block) error {
// Check if it is already exists, and return earlier
blkHash, err := bc.dao.getBlockHash(blk.Height())
if blkHash != hash.ZeroHash256 {
log.L().Debug("Block already exists.", zap.Uint64("height", blk.Height()))
return nil
}
// If it's a ready db io error, return earlier with the error
if errors.Cause(err) != db.ErrNotExist {
return err
}
// write block into DB
putTimer := bc.timerFactory.NewTimer("putBlock")
err = bc.dao.putBlock(blk)
putTimer.End()
if err != nil {
return err
}
// update tip hash and height
atomic.StoreUint64(&bc.tipHeight, blk.Height())
bc.tipHash = blk.HashBlock()
if bc.sf != nil {
sfTimer := bc.timerFactory.NewTimer("sf.Commit")
err := bc.sf.Commit(blk.WorkingSet)
sfTimer.End()
// detach working set so it can be freed by GC
blk.WorkingSet = nil
if err != nil {
log.L().Panic("Error when committing states.", zap.Error(err))
}
// write smart contract receipt into DB
receiptTimer := bc.timerFactory.NewTimer("putReceipt")
err = bc.dao.putReceipts(blk.Height(), blk.Receipts)
receiptTimer.End()
if err != nil {
return errors.Wrapf(err, "failed to put smart contract receipts into DB on height %d", blk.Height())
}
}
blk.HeaderLogger(log.L()).Info("Committed a block.", log.Hex("tipHash", bc.tipHash[:]))
// emit block to all block subscribers
bc.emitToSubscribers(blk)
return nil
}
func (bc *blockchain) runActions(
acts block.RunnableActions,
ws factory.WorkingSet,
) ([]*action.Receipt, error) {
if bc.sf == nil {
return nil, errors.New("statefactory cannot be nil")
}
gasLimit := bc.config.Genesis.BlockGasLimit
// update state factory
producer, err := address.FromBytes(acts.BlockProducerPubKey().Hash())
if err != nil {
return nil, err
}
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
BlockHeight: acts.BlockHeight(),
BlockTimeStamp: acts.BlockTimeStamp(),
Producer: producer,
GasLimit: gasLimit,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
Registry: bc.registry,
})
return ws.RunActions(ctx, acts.BlockHeight(), acts.Actions())
}
func (bc *blockchain) pickAndRunActions(ctx context.Context, actionMap map[string][]action.SealedEnvelope,
ws factory.WorkingSet) (hash.Hash256, []*action.Receipt, []action.SealedEnvelope, error) {
if bc.sf == nil {
return hash.ZeroHash256, nil, nil, errors.New("statefactory cannot be nil")
}
receipts := make([]*action.Receipt, 0)
executedActions := make([]action.SealedEnvelope, 0)
raCtx := protocol.MustGetRunActionsCtx(ctx)
// initial action iterator
actionIterator := actioniterator.NewActionIterator(actionMap)
for {
nextAction, ok := actionIterator.Next()
if !ok {
break
}
receipt, err := ws.RunAction(raCtx, nextAction)
if err != nil {
if errors.Cause(err) == action.ErrHitGasLimit {
// hit block gas limit, we should not process actions belong to this user anymore since we
// need monotonically increasing nounce. But we can continue processing other actions
// that belong other users
actionIterator.PopAccount()
continue
}
return hash.ZeroHash256, nil, nil, errors.Wrapf(err, "Failed to update state changes for selp %x", nextAction.Hash())
}
if receipt != nil {
raCtx.GasLimit -= receipt.GasConsumed
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, nextAction)
// To prevent loop all actions in act_pool, we stop processing action when remaining gas is below
// than certain threshold
if raCtx.GasLimit < bc.config.Chain.AllowedBlockGasResidue {
break
}
}
rp := bc.mustGetRollDPoSProtocol()
epochNum := rp.GetEpochNum(raCtx.BlockHeight)
lastBlkHeight := rp.GetEpochLastBlockHeight(epochNum)
// generate delegates for next round
skip, putPollResult, err := bc.createPutPollResultAction(raCtx.BlockHeight)
switch errors.Cause(err) {
case nil:
if !skip {
receipt, err := ws.RunAction(raCtx, putPollResult)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, putPollResult)
}
case errDelegatesNotExist:
if raCtx.BlockHeight == lastBlkHeight {
// TODO (zhi): if some bp by pass this condition, we need to reject block in validation step
return hash.ZeroHash256, nil, nil, errors.Wrapf(
err,
"failed to prepare delegates for next epoch %d",
epochNum+1,
)
}
default:
return hash.ZeroHash256, nil, nil, err
}
// Process grant block reward action
grant, err := bc.createGrantRewardAction(action.BlockReward, raCtx.BlockHeight)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
receipt, err := ws.RunAction(raCtx, grant)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, grant)
// Process grant epoch reward action if the block is the last one in an epoch
if raCtx.BlockHeight == lastBlkHeight {
grant, err = bc.createGrantRewardAction(action.EpochReward, raCtx.BlockHeight)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
receipt, err = ws.RunAction(raCtx, grant)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, grant)
}
blockMtc.WithLabelValues("gasConsumed").Set(float64(bc.config.Genesis.BlockGasLimit - raCtx.GasLimit))
return ws.UpdateBlockLevelInfo(raCtx.BlockHeight), receipts, executedActions, nil
}
func (bc *blockchain) createPutPollResultAction(height uint64) (skip bool, se action.SealedEnvelope, err error) {
skip = true
if !bc.config.Genesis.EnableGravityChainVoting {
return
}
pl, ok := bc.protocol(poll.ProtocolID)
if !ok {
log.L().Panic("protocol poll has not been registered")
}
pp, ok := pl.(poll.Protocol)
if !ok {
log.L().Panic("Failed to cast to poll.Protocol")
}
rp := bc.mustGetRollDPoSProtocol()
epochNum := rp.GetEpochNum(height)
epochHeight := rp.GetEpochHeight(epochNum)
nextEpochHeight := rp.GetEpochHeight(epochNum + 1)
if height < epochHeight+(nextEpochHeight-epochHeight)/2 {
return
}
log.L().Debug(
"createPutPollResultAction",
zap.Uint64("height", height),
zap.Uint64("epochNum", epochNum),
zap.Uint64("epochHeight", epochHeight),
zap.Uint64("nextEpochHeight", nextEpochHeight),
)
_, err = bc.candidatesByHeight(nextEpochHeight)
switch errors.Cause(err) {
case nil:
return
case state.ErrStateNotExist:
skip = false
default:
return
}
l, err := pp.DelegatesByHeight(epochHeight)
switch errors.Cause(err) {
case nil:
if len(l) == 0 {
err = errors.Wrapf(
errDelegatesNotExist,
"failed to fetch delegates by epoch height %d, empty list",
epochHeight,
)
return
}
case db.ErrNotExist:
err = errors.Wrapf(
errDelegatesNotExist,
"failed to fetch delegates by epoch height %d, original error %v",
epochHeight,
err,
)
return
default:
return
}
sk := bc.config.ProducerPrivateKey()
nonce := uint64(0)
pollAction := action.NewPutPollResult(nonce, nextEpochHeight, l)
builder := action.EnvelopeBuilder{}
se, err = action.Sign(builder.SetNonce(nonce).SetAction(pollAction).Build(), sk)
return skip, se, err
}
func (bc *blockchain) emitToSubscribers(blk *block.Block) {
if bc.blocklistener == nil {
return
}
for _, s := range bc.blocklistener {
go func(bcs BlockCreationSubscriber, b *block.Block) {
if err := bcs.HandleBlock(b); err != nil {
log.L().Error("Failed to handle new block.", zap.Error(err))
}
}(s, blk)
}
}
// RecoverToHeight recovers the blockchain to target height
func (bc *blockchain) recoverToHeight(targetHeight uint64) error {
for bc.tipHeight > targetHeight {
if err := bc.dao.deleteTipBlock(); err != nil {
return err
}
bc.tipHeight--
}
return nil
}
// RefreshStateDB deletes the existing state DB and creates a new one with state changes from genesis block
func (bc *blockchain) refreshStateDB() error {
// Delete existing state DB and reinitialize it
if fileutil.FileExists(bc.config.Chain.TrieDBPath) && os.Remove(bc.config.Chain.TrieDBPath) != nil {
return errors.New("failed to delete existing state DB")
}
if err := DefaultStateFactoryOption()(bc, bc.config); err != nil {
return errors.Wrap(err, "failed to reinitialize state DB")
}
for _, p := range bc.registry.All() {
bc.sf.AddActionHandlers(p)
}
if err := bc.sf.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start state factory")
}
if err := bc.startEmptyBlockchain(); err != nil {
return err
}
if err := bc.sf.Stop(context.Background()); err != nil {
return errors.Wrap(err, "failed to stop state factory")
}
return nil
}
func (bc *blockchain) createGrantRewardAction(rewardType int, height uint64) (action.SealedEnvelope, error) {
gb := action.GrantRewardBuilder{}
grant := gb.SetRewardType(rewardType).SetHeight(height).Build()
eb := action.EnvelopeBuilder{}
envelope := eb.SetNonce(0).
SetGasPrice(big.NewInt(0)).
SetGasLimit(grant.GasLimit()).
SetAction(&grant).
Build()
sk := bc.config.ProducerPrivateKey()
return action.Sign(envelope, sk)
}
func (bc *blockchain) createGenesisStates(ws factory.WorkingSet) error {
if bc.registry == nil {
// TODO: return nil to avoid test cases to blame on missing rewarding protocol
return nil
}
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: 0,
BlockTimeStamp: time.Unix(bc.config.Genesis.Timestamp, 0),
GasLimit: 0,
ActionGasLimit: bc.config.Genesis.ActionGasLimit,
Producer: nil,
Caller: nil,
ActionHash: hash.ZeroHash256,
Nonce: 0,
Registry: bc.registry,
})
if err := bc.createAccountGenesisStates(ctx, ws); err != nil {
return err
}
if err := bc.createPollGenesisStates(ctx, ws); err != nil {
return err
}
return bc.createRewardingGenesisStates(ctx, ws)
}
func (bc *blockchain) createAccountGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
p, ok := bc.registry.Find(account.ProtocolID)
if !ok {
return nil
}
ap, ok := p.(*account.Protocol)
if !ok {
return errors.Errorf("error when casting protocol")
}
addrs, balances := bc.config.Genesis.InitBalances()
return ap.Initialize(ctx, ws, addrs, balances)
}
func (bc *blockchain) createRewardingGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
p, ok := bc.registry.Find(rewarding.ProtocolID)
if !ok {
return nil
}
rp, ok := p.(*rewarding.Protocol)
if !ok {
return errors.Errorf("error when casting protocol")
}
return rp.Initialize(
ctx,
ws,
bc.config.Genesis.InitBalance(),
bc.config.Genesis.BlockReward(),
bc.config.Genesis.EpochReward(),
bc.config.Genesis.NumDelegatesForEpochReward,
bc.config.Genesis.ExemptAddrsFromEpochReward(),
bc.config.Genesis.FoundationBonus(),
bc.config.Genesis.NumDelegatesForFoundationBonus,
bc.config.Genesis.FoundationBonusLastEpoch,
bc.config.Genesis.ProductivityThreshold,
)
}
func (bc *blockchain) createPollGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
if bc.config.Genesis.EnableGravityChainVoting {
p, ok := bc.protocol(poll.ProtocolID)
if !ok {
return errors.Errorf("protocol %s is not found", poll.ProtocolID)
}
pp, ok := p.(poll.Protocol)
if !ok {
return errors.Errorf("error when casting poll protocol")
}
return pp.Initialize(
ctx,
ws,
)
}
return nil
}
func calculateReceiptRoot(receipts []*action.Receipt) hash.Hash256 {
h := make([]hash.Hash256, 0, len(receipts))
for _, receipt := range receipts {
h = append(h, receipt.Hash())
}
if len(h) == 0 {
return hash.ZeroHash256
}
res := crypto.NewMerkleTree(h).HashTree()
return res
}
| 1 | 17,677 | File is not `goimports`-ed (from `goimports`) | iotexproject-iotex-core | go |
@@ -317,8 +317,16 @@ func (mset *Stream) AddConsumer(config *ConsumerConfig) (*Consumer, error) {
}
}
- // Check for any limits.
- if mset.config.MaxConsumers > 0 && len(mset.consumers) >= mset.config.MaxConsumers {
+ // Check for any limits, if the config for the consumer sets a limit we check against that
+ // but if not we use the value from account limits, if account limits is more restrictive
+ // than stream config we prefer the account limits to handle cases where account limits are
+ // updated during the lifecycle of the stream
+ maxc := mset.config.MaxConsumers
+ if mset.config.MaxConsumers <= 0 || mset.jsa.limits.MaxConsumers < mset.config.MaxConsumers {
+ maxc = mset.jsa.limits.MaxConsumers
+ }
+
+ if maxc > 0 && len(mset.consumers) >= maxc {
mset.mu.Unlock()
return nil, fmt.Errorf("maximum consumers limit reached")
} | 1 | // Copyright 2019-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/json"
"fmt"
mrand "math/rand"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/nats-io/nuid"
)
type ConsumerInfo struct {
Stream string `json:"stream_name"`
Name string `json:"name"`
Created time.Time `json:"created"`
Config ConsumerConfig `json:"config"`
Delivered SequencePair `json:"delivered"`
AckFloor SequencePair `json:"ack_floor"`
NumPending int `json:"num_pending"`
NumRedelivered int `json:"num_redelivered"`
}
type ConsumerConfig struct {
Durable string `json:"durable_name,omitempty"`
DeliverSubject string `json:"deliver_subject,omitempty"`
DeliverPolicy DeliverPolicy `json:"deliver_policy"`
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
AckPolicy AckPolicy `json:"ack_policy"`
AckWait time.Duration `json:"ack_wait,omitempty"`
MaxDeliver int `json:"max_deliver,omitempty"`
FilterSubject string `json:"filter_subject,omitempty"`
ReplayPolicy ReplayPolicy `json:"replay_policy"`
SampleFrequency string `json:"sample_freq,omitempty"`
}
type CreateConsumerRequest struct {
Stream string `json:"stream_name"`
Config ConsumerConfig `json:"config"`
}
// DeliverPolicy determines how the consumer should select the first message to deliver.
type DeliverPolicy int
const (
// DeliverAll will be the default so can be omitted from the request.
DeliverAll DeliverPolicy = iota
// DeliverLast will start the consumer with the last sequence received.
DeliverLast
// DeliverNew will only deliver new messages that are sent after the consumer is created.
DeliverNew
// DeliverByStartSequence will look for a defined starting sequence to start.
DeliverByStartSequence
// DeliverByStartTime will select the first messsage with a timestamp >= to StartTime
DeliverByStartTime
)
func (dp DeliverPolicy) String() string {
switch dp {
case DeliverAll:
return "all"
case DeliverLast:
return "last"
case DeliverNew:
return "new"
case DeliverByStartSequence:
return "by_start_sequence"
case DeliverByStartTime:
return "by_start_time"
default:
return "undefined"
}
}
// AckPolicy determines how the consumer should acknowledge delivered messages.
type AckPolicy int
const (
// AckNone requires no acks for delivered messages.
AckNone AckPolicy = iota
// AckAll when acking a sequence number, this implicitly acks all sequences below this one as well.
AckAll
// AckExplicit requires ack or nack for all messages.
AckExplicit
)
func (a AckPolicy) String() string {
switch a {
case AckNone:
return "none"
case AckAll:
return "all"
default:
return "explicit"
}
}
// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream.
type ReplayPolicy int
const (
// ReplayInstant will replay messages as fast as possible.
ReplayInstant ReplayPolicy = iota
// ReplayOriginal will maintain the same timing as the messages were received.
ReplayOriginal
)
func (r ReplayPolicy) String() string {
switch r {
case ReplayInstant:
return "instant"
default:
return "original"
}
}
// OK
const OK = "+OK"
// Ack responses. Note that a nil or no payload is same as AckAck
var (
// Ack
AckAck = []byte("+ACK") // nil or no payload to ack subject also means ACK
AckOK = []byte(OK) // deprecated but +OK meant ack as well.
// Nack
AckNak = []byte("-NAK")
// Progress indicator
AckProgress = []byte("+WPI")
// Ack + Deliver the next message(s).
AckNext = []byte("+NXT")
// Terminate delivery of the message.
AckTerm = []byte("+TERM")
)
// Consumer is a jetstream consumer.
type Consumer struct {
mu sync.Mutex
mset *Stream
acc *Account
name string
stream string
sseq uint64
dseq uint64
adflr uint64
asflr uint64
dsubj string
reqSub *subscription
ackSub *subscription
ackReplyT string
nextMsgSubj string
pending map[uint64]int64
ptmr *time.Timer
rdq []uint64
rdc map[uint64]uint64
maxdc uint64
waiting []string
config ConsumerConfig
store ConsumerStore
active bool
replay bool
filterWC bool
dtmr *time.Timer
dthresh time.Duration
fch chan struct{}
qch chan struct{}
inch chan bool
sfreq int32
ackEventT string
deliveryExcEventT string
created time.Time
}
const (
// JsAckWaitDefault is the default AckWait, only applicable on explicit ack policy observables.
JsAckWaitDefault = 30 * time.Second
// JsDeleteWaitTimeDefault is the default amount of time we will wait for non-durable
// observables to be in an inactive state before deleting them.
JsDeleteWaitTimeDefault = 5 * time.Second
)
func (mset *Stream) AddConsumer(config *ConsumerConfig) (*Consumer, error) {
if config == nil {
return nil, fmt.Errorf("consumer config required")
}
var err error
// For now expect a literal subject if its not empty. Empty means work queue mode (pull mode).
if config.DeliverSubject != _EMPTY_ {
if !subjectIsLiteral(config.DeliverSubject) {
return nil, fmt.Errorf("consumer deliver subject has wildcards")
}
if mset.deliveryFormsCycle(config.DeliverSubject) {
return nil, fmt.Errorf("consumer deliver subject forms a cycle")
}
} else {
// Pull mode / work queue mode require explicit ack.
if config.AckPolicy != AckExplicit {
return nil, fmt.Errorf("consumer in pull mode requires explicit ack policy")
}
// They are also required to be durable since otherwise we will not know when to
// clean them up.
if config.Durable == _EMPTY_ {
return nil, fmt.Errorf("consumer in pull mode requires a durable name")
}
}
// Setup proper default for ack wait if we are in explicit ack mode.
if config.AckWait == 0 && (config.AckPolicy == AckExplicit || config.AckPolicy == AckAll) {
config.AckWait = JsAckWaitDefault
}
// Setup default of -1, meaning no limit for MaxDeliver.
if config.MaxDeliver == 0 {
config.MaxDeliver = -1
}
// Make sure any partition subject is also a literal.
if config.FilterSubject != "" {
// Make sure this is a valid partition of the interest subjects.
if !mset.validSubject(config.FilterSubject) {
return nil, fmt.Errorf("consumer filter subject is not a valid subset of the interest subjects")
}
if config.AckPolicy == AckAll {
return nil, fmt.Errorf("consumer with filter subject can not have an ack policy of ack all")
}
}
// Check on start position conflicts.
switch config.DeliverPolicy {
case DeliverAll:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver all, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver all, but optional start time is also set")
}
case DeliverLast:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver last, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver last, but optional start time is also set")
}
case DeliverNew:
if config.OptStartSeq > 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver new, but optional start sequence is also set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver new, but optional start time is also set")
}
case DeliverByStartSequence:
if config.OptStartSeq == 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver by start sequence, but optional start sequence is not set")
}
if config.OptStartTime != nil {
return nil, fmt.Errorf("consumer delivery policy is deliver by start sequence, but optional start time is also set")
}
case DeliverByStartTime:
if config.OptStartTime == nil {
return nil, fmt.Errorf("consumer delivery policy is deliver by start time, but optional start time is not set")
}
if config.OptStartSeq != 0 {
return nil, fmt.Errorf("consumer delivery policy is deliver by start time, but optional start sequence is also set")
}
}
sampleFreq := 0
if config.SampleFrequency != "" {
s := strings.TrimSuffix(config.SampleFrequency, "%")
sampleFreq, err = strconv.Atoi(s)
if err != nil {
return nil, fmt.Errorf("failed to parse consumer sampling configuration: %v", err)
}
}
// Hold mset lock here.
mset.mu.Lock()
// If this one is durable and already exists, we let that be ok as long as the configs match.
if isDurableConsumer(config) {
if eo, ok := mset.consumers[config.Durable]; ok {
mset.mu.Unlock()
ocfg := eo.Config()
if reflect.DeepEqual(&ocfg, config) {
return eo, nil
} else {
// If we are a push mode and not active and the only difference
// is deliver subject then update and return.
if configsEqualSansDelivery(ocfg, *config) && eo.hasNoLocalInterest() {
eo.updateDeliverSubject(config.DeliverSubject)
return eo, nil
} else {
return nil, fmt.Errorf("consumer already exists")
}
}
}
}
// Check for any limits.
if mset.config.MaxConsumers > 0 && len(mset.consumers) >= mset.config.MaxConsumers {
mset.mu.Unlock()
return nil, fmt.Errorf("maximum consumers limit reached")
}
// Check on stream type conflicts.
switch mset.config.Retention {
case WorkQueuePolicy:
// Force explicit acks here.
if config.AckPolicy != AckExplicit {
mset.mu.Unlock()
return nil, fmt.Errorf("workqueue stream requires explicit ack")
}
if len(mset.consumers) > 0 {
if config.FilterSubject == _EMPTY_ {
mset.mu.Unlock()
return nil, fmt.Errorf("multiple non-filtered observables not allowed on workqueue stream")
} else if !mset.partitionUnique(config.FilterSubject) {
// We have a partition but it is not unique amongst the others.
mset.mu.Unlock()
return nil, fmt.Errorf("filtered consumer not unique on workqueue stream")
}
}
if config.DeliverPolicy != DeliverAll {
mset.mu.Unlock()
return nil, fmt.Errorf("consumer must be deliver all on workqueue stream")
}
}
// Set name, which will be durable name if set, otherwise we create one at random.
o := &Consumer{mset: mset,
config: *config,
dsubj: config.DeliverSubject,
active: true,
qch: make(chan struct{}),
fch: make(chan struct{}),
sfreq: int32(sampleFreq),
maxdc: uint64(config.MaxDeliver),
created: time.Now().UTC(),
}
if isDurableConsumer(config) {
if len(config.Durable) > JSMaxNameLen {
mset.mu.Unlock()
return nil, fmt.Errorf("consumer name is too long, maximum allowed is %d", JSMaxNameLen)
}
o.name = config.Durable
} else {
for {
o.name = createConsumerName()
if _, ok := mset.consumers[o.name]; !ok {
break
}
}
}
// Check if we have filtered subject that is a wildcard.
if config.FilterSubject != _EMPTY_ && !subjectIsLiteral(config.FilterSubject) {
o.filterWC = true
}
// already under lock, mset.Name() would deadlock
o.stream = mset.config.Name
o.ackEventT = JSMetricConsumerAckPre + "." + o.stream + "." + o.name
o.deliveryExcEventT = JSAdvisoryConsumerMaxDeliveryExceedPre + "." + o.stream + "." + o.name
store, err := mset.store.ConsumerStore(o.name, config)
if err != nil {
mset.mu.Unlock()
return nil, fmt.Errorf("error creating store for observable: %v", err)
}
o.store = store
if !isValidName(o.name) {
mset.mu.Unlock()
return nil, fmt.Errorf("durable name can not contain '.', '*', '>'")
}
// Select starting sequence number
o.selectStartingSeqNo()
// Now register with mset and create the ack subscription.
c := mset.client
if c == nil {
mset.mu.Unlock()
return nil, fmt.Errorf("stream not valid")
}
s, a := c.srv, c.acc
o.acc = a
// Check if we already have this one registered.
if eo, ok := mset.consumers[o.name]; ok {
mset.mu.Unlock()
if !o.isDurable() || !o.isPushMode() {
return nil, fmt.Errorf("consumer already exists")
}
// If we are here we have already registered this durable. If it is still active that is an error.
if eo.Active() {
return nil, fmt.Errorf("consumer already exists and is still active")
}
// Since we are here this means we have a potentially new durable so we should update here.
// Check that configs are the same.
if !configsEqualSansDelivery(o.config, eo.config) {
return nil, fmt.Errorf("consumer replacement durable config not the same")
}
// Once we are here we have a replacement push-based durable.
eo.updateDeliverSubject(o.config.DeliverSubject)
return eo, nil
}
// Set up the ack subscription for this observable. Will use wildcard for all acks.
// We will remember the template to generate replies with sequence numbers and use
// that to scanf them back in.
mn := mset.config.Name
pre := fmt.Sprintf(jsAckT, mn, o.name)
o.ackReplyT = fmt.Sprintf("%s.%%d.%%d.%%d.%%d", pre)
ackSubj := fmt.Sprintf("%s.*.*.*.*", pre)
if sub, err := mset.subscribeInternal(ackSubj, o.processAck); err != nil {
mset.mu.Unlock()
return nil, err
} else {
o.ackSub = sub
}
// Setup the internal sub for next message requests.
if !o.isPushMode() {
o.nextMsgSubj = fmt.Sprintf(JSApiRequestNextT, mn, o.name)
if sub, err := mset.subscribeInternal(o.nextMsgSubj, o.processNextMsgReq); err != nil {
mset.mu.Unlock()
o.deleteWithoutAdvisory()
return nil, err
} else {
o.reqSub = sub
}
}
mset.consumers[o.name] = o
mset.mu.Unlock()
// If push mode, register for notifications on interest.
if o.isPushMode() {
o.dthresh = JsDeleteWaitTimeDefault
o.inch = make(chan bool, 4)
a.sl.RegisterNotification(config.DeliverSubject, o.inch)
o.active = o.hasDeliveryInterest(<-o.inch)
// Check if we are not durable that the delivery subject has interest.
if !o.isDurable() && !o.active {
o.deleteWithoutAdvisory()
return nil, fmt.Errorf("consumer requires interest for delivery subject when ephemeral")
}
}
// If we are not in ReplayInstant mode mark us as in replay state until resolved.
if config.ReplayPolicy != ReplayInstant {
o.replay = true
}
// Now start up Go routine to deliver msgs.
go o.loopAndDeliverMsgs(s, a)
// Startup our state update loop.
go o.updateStateLoop()
o.sendCreateAdvisory()
return o, nil
}
// We need to make sure we protect access to the sendq.
// Do all advisory sends here.
// Lock should be held on entry but will be released.
func (o *Consumer) sendAdvisory(subj string, msg []byte) {
if o.mset != nil && o.mset.sendq != nil {
sendq := o.mset.sendq
o.mu.Unlock()
sendq <- &jsPubMsg{subj, subj, _EMPTY_, nil, msg, nil, 0}
o.mu.Lock()
}
}
func (o *Consumer) sendDeleteAdvisoryLocked() {
e := JSConsumerActionAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
Action: DeleteEvent,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
subj := JSAdvisoryConsumerDeletedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
func (o *Consumer) sendCreateAdvisory() {
o.mu.Lock()
defer o.mu.Unlock()
e := JSConsumerActionAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerActionAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
Action: CreateEvent,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
subj := JSAdvisoryConsumerCreatedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
// Created returns created time.
func (o *Consumer) Created() time.Time {
o.mu.Lock()
created := o.created
o.mu.Unlock()
return created
}
// Internal to allow creation time to be restored.
func (o *Consumer) setCreated(created time.Time) {
o.mu.Lock()
o.created = created
o.mu.Unlock()
}
// This will check for extended interest in a subject. If we have local interest we just return
// that, but in the absence of local interest and presence of gateways or service imports we need
// to check those as well.
func (o *Consumer) hasDeliveryInterest(localInterest bool) bool {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return false
}
acc := o.acc
deliver := o.config.DeliverSubject
o.mu.Unlock()
if localInterest {
return true
}
// If we are here check gateways.
if acc.srv != nil && acc.srv.gateway.enabled {
gw := acc.srv.gateway
gw.RLock()
for _, gwc := range gw.outo {
psi, qr := gwc.gatewayInterest(acc.Name, deliver)
if psi || qr != nil {
gw.RUnlock()
return true
}
}
gw.RUnlock()
}
return false
}
// This processes an update to the local interest for a deliver subject.
func (o *Consumer) updateDeliveryInterest(localInterest bool) {
interest := o.hasDeliveryInterest(localInterest)
o.mu.Lock()
mset := o.mset
if mset == nil || o.isPullMode() {
o.mu.Unlock()
return
}
shouldSignal := interest && !o.active
o.active = interest
// Stop and clear the delete timer always.
stopAndClearTimer(&o.dtmr)
// If we do not have interest anymore and we are not durable start
// a timer to delete us. We wait for a bit in case of server reconnect.
if !o.isDurable() && !interest {
o.dtmr = time.AfterFunc(o.dthresh, func() { o.Delete() })
}
o.mu.Unlock()
if shouldSignal {
mset.signalConsumers()
}
}
// Config returns the consumer's configuration.
func (o *Consumer) Config() ConsumerConfig {
o.mu.Lock()
defer o.mu.Unlock()
return o.config
}
// This is a config change for the delivery subject for a
// push based consumer.
func (o *Consumer) updateDeliverSubject(newDeliver string) {
// Update the config and the dsubj
o.mu.Lock()
defer o.mu.Unlock()
mset := o.mset
if mset == nil || o.isPullMode() {
return
}
oldDeliver := o.config.DeliverSubject
o.dsubj = newDeliver
o.config.DeliverSubject = newDeliver
// FIXME(dlc) - check partitions, we may need offset.
o.dseq = o.adflr
o.sseq = o.asflr
// When we register new one it will deliver to update state loop.
o.acc.sl.ClearNotification(oldDeliver, o.inch)
o.acc.sl.RegisterNotification(newDeliver, o.inch)
}
// Check that configs are equal but allow delivery subjects to be different.
func configsEqualSansDelivery(a, b ConsumerConfig) bool {
// These were copied in so can set Delivery here.
a.DeliverSubject, b.DeliverSubject = _EMPTY_, _EMPTY_
return a == b
}
// Helper to send a reply to an ack.
func (o *Consumer) sendAckReply(subj string) {
o.mu.Lock()
defer o.mu.Unlock()
o.sendAdvisory(subj, nil)
}
// Process a message for the ack reply subject delivered with a message.
func (o *Consumer) processAck(_ *subscription, _ *client, subject, reply string, msg []byte) {
sseq, dseq, dcount, _ := o.ReplyInfo(subject)
switch {
case len(msg) == 0, bytes.Equal(msg, AckAck), bytes.Equal(msg, AckOK):
o.ackMsg(sseq, dseq, dcount)
case bytes.Equal(msg, AckNext):
o.ackMsg(sseq, dseq, dcount)
o.processNextMsgReq(nil, nil, subject, reply, nil)
case bytes.Equal(msg, AckNak):
o.processNak(sseq, dseq)
case bytes.Equal(msg, AckProgress):
o.progressUpdate(sseq)
case bytes.Equal(msg, AckTerm):
o.processTerm(sseq, dseq, dcount)
}
// Ack the ack if requested.
if len(reply) > 0 {
o.sendAckReply(reply)
}
}
// Used to process a working update to delay redelivery.
func (o *Consumer) progressUpdate(seq uint64) {
o.mu.Lock()
if len(o.pending) > 0 {
if _, ok := o.pending[seq]; ok {
o.pending[seq] = time.Now().UnixNano()
}
}
o.mu.Unlock()
}
// Process a NAK.
func (o *Consumer) processNak(sseq, dseq uint64) {
var mset *Stream
o.mu.Lock()
// Check for out of range.
if dseq <= o.adflr || dseq > o.dseq {
o.mu.Unlock()
return
}
// If we are explicit ack make sure this is still on pending list.
if len(o.pending) > 0 {
if _, ok := o.pending[sseq]; !ok {
o.mu.Unlock()
return
}
}
// If already queued up also ignore.
if !o.onRedeliverQueue(sseq) {
o.rdq = append(o.rdq, sseq)
mset = o.mset
}
o.mu.Unlock()
if mset != nil {
mset.signalConsumers()
}
}
// Process a TERM
func (o *Consumer) processTerm(sseq, dseq, dcount uint64) {
// Treat like an ack to suppress redelivery.
o.processAckMsg(sseq, dseq, dcount, false)
o.mu.Lock()
defer o.mu.Unlock()
// Deliver an advisory
e := JSConsumerDeliveryTerminatedAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerDeliveryTerminatedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Deliveries: dcount,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
subj := JSAdvisoryConsumerMsgTerminatedPre + "." + o.stream + "." + o.name
o.sendAdvisory(subj, j)
}
// Introduce a small delay in when timer fires to check pending.
// Allows bursts to be treated in same time frame.
const ackWaitDelay = time.Millisecond
// ackWait returns how long to wait to fire the pending timer.
func (o *Consumer) ackWait(next time.Duration) time.Duration {
if next > 0 {
return next + ackWaitDelay
}
return o.config.AckWait + ackWaitDelay
}
// This will restore the state from disk.
func (o *Consumer) readStoredState() error {
if o.store == nil {
return nil
}
state, err := o.store.State()
if err == nil && state != nil {
// FIXME(dlc) - re-apply state.
o.dseq = state.Delivered.ConsumerSeq
o.sseq = state.Delivered.StreamSeq
o.adflr = state.AckFloor.ConsumerSeq
o.asflr = state.AckFloor.StreamSeq
o.pending = state.Pending
o.rdc = state.Redelivered
}
// Setup tracking timer if we have restored pending.
if len(o.pending) > 0 && o.ptmr == nil {
o.mu.Lock()
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
o.mu.Unlock()
}
return err
}
// Update our state to the store.
func (o *Consumer) writeState() {
o.mu.Lock()
if o.store != nil {
state := &ConsumerState{
Delivered: SequencePair{
ConsumerSeq: o.dseq,
StreamSeq: o.sseq,
},
AckFloor: SequencePair{
ConsumerSeq: o.adflr,
StreamSeq: o.asflr,
},
Pending: o.pending,
Redelivered: o.rdc,
}
// FIXME(dlc) - Hold onto any errors.
o.store.Update(state)
}
o.mu.Unlock()
}
func (o *Consumer) updateStateLoop() {
o.mu.Lock()
fch := o.fch
qch := o.qch
inch := o.inch
o.mu.Unlock()
for {
select {
case <-qch:
return
case interest := <-inch:
// inch can be nil on pull-based, but then this will
// just block and not fire.
o.updateDeliveryInterest(interest)
case <-fch:
// FIXME(dlc) - Check for fast changes at quick intervals.
time.Sleep(25 * time.Millisecond)
o.writeState()
}
}
}
// Info returns our current consumer state.
func (o *Consumer) Info() *ConsumerInfo {
o.mu.Lock()
info := &ConsumerInfo{
Stream: o.stream,
Name: o.name,
Created: o.created,
Config: o.config,
Delivered: SequencePair{
ConsumerSeq: o.dseq - 1,
StreamSeq: o.sseq - 1,
},
AckFloor: SequencePair{
ConsumerSeq: o.adflr,
StreamSeq: o.asflr,
},
NumPending: len(o.pending),
NumRedelivered: len(o.rdc),
}
o.mu.Unlock()
return info
}
// Will update the underlying store.
// Lock should be held.
func (o *Consumer) updateStore() {
if o.store == nil {
return
}
// Kick our flusher
select {
case o.fch <- struct{}{}:
default:
}
}
// shouldSample lets us know if we are sampling metrics on acks.
func (o *Consumer) shouldSample() bool {
switch {
case o.sfreq <= 0:
return false
case o.sfreq >= 100:
return true
}
// TODO(ripienaar) this is a tad slow so we need to rethink here, however this will only
// hit for those with sampling enabled and its not the default
return mrand.Int31n(100) <= o.sfreq
}
func (o *Consumer) sampleAck(sseq, dseq, dcount uint64) {
if !o.shouldSample() {
return
}
now := time.Now().UTC()
unow := now.UnixNano()
e := JSConsumerAckMetric{
TypedEvent: TypedEvent{
Type: JSConsumerAckMetricType,
ID: nuid.Next(),
Time: now,
},
Stream: o.stream,
Consumer: o.name,
ConsumerSeq: dseq,
StreamSeq: sseq,
Delay: unow - o.pending[sseq],
Deliveries: dcount,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
o.sendAdvisory(o.ackEventT, j)
}
// Process an ack for a message.
func (o *Consumer) ackMsg(sseq, dseq, dcount uint64) {
o.processAckMsg(sseq, dseq, dcount, true)
}
func (o *Consumer) processAckMsg(sseq, dseq, dcount uint64, doSample bool) {
var sagap uint64
o.mu.Lock()
switch o.config.AckPolicy {
case AckExplicit:
if _, ok := o.pending[sseq]; ok {
if doSample {
o.sampleAck(sseq, dseq, dcount)
}
delete(o.pending, sseq)
}
// Consumers sequence numbers can skip during redlivery since
// they always increment. So if we do not have any pending treat
// as all scenario below. Otherwise check that we filled in a gap.
// TODO(dlc) - check this.
if len(o.pending) == 0 || dseq == o.adflr+1 {
o.adflr, o.asflr = dseq, sseq
}
delete(o.rdc, sseq)
o.removeFromRedeliverQueue(sseq)
case AckAll:
// no-op
if dseq <= o.adflr || sseq <= o.asflr {
o.mu.Unlock()
return
}
sagap = sseq - o.asflr
o.adflr, o.asflr = dseq, sseq
for seq := sseq; seq > sseq-sagap; seq-- {
delete(o.pending, seq)
delete(o.rdc, seq)
o.removeFromRedeliverQueue(seq)
}
case AckNone:
// FIXME(dlc) - This is error but do we care?
o.mu.Unlock()
return
}
o.updateStore()
mset := o.mset
o.mu.Unlock()
// Let the owning stream know if we are interest or workqueue retention based.
if mset != nil && mset.config.Retention != LimitsPolicy {
if sagap > 1 {
// FIXME(dlc) - This is very inefficient, will need to fix.
for seq := sseq; seq > sseq-sagap; seq-- {
mset.ackMsg(o, seq)
}
} else {
mset.ackMsg(o, sseq)
}
}
}
// Check if we need an ack for this store seq.
func (o *Consumer) needAck(sseq uint64) bool {
var na bool
o.mu.Lock()
switch o.config.AckPolicy {
case AckNone, AckAll:
na = sseq > o.asflr
case AckExplicit:
if sseq > o.asflr && len(o.pending) > 0 {
_, na = o.pending[sseq]
}
}
o.mu.Unlock()
return na
}
// Default is 1 if msg is nil.
func batchSizeFromMsg(msg []byte) int {
bs := 1
if len(msg) > 0 {
if n, err := strconv.Atoi(string(msg)); err == nil {
bs = n
}
}
return bs
}
// processNextMsgReq will process a request for the next message available. A nil message payload means deliver
// a single message. If the payload is a number parseable with Atoi(), then we will send a batch of messages without
// requiring another request to this endpoint, or an ACK.
func (o *Consumer) processNextMsgReq(_ *subscription, _ *client, _, reply string, msg []byte) {
// Check payload here to see if they sent in batch size.
batchSize := batchSizeFromMsg(msg)
o.mu.Lock()
mset := o.mset
if mset == nil || o.isPushMode() {
o.mu.Unlock()
return
}
shouldSignal := false
for i := 0; i < batchSize; i++ {
// If we are in replay mode, defer to processReplay for delivery.
if o.replay {
o.waiting = append(o.waiting, reply)
shouldSignal = true
} else if subj, hdr, msg, seq, dc, ts, err := o.getNextMsg(); err == nil {
o.deliverMsg(reply, subj, hdr, msg, seq, dc, ts)
} else {
o.waiting = append(o.waiting, reply)
}
}
o.mu.Unlock()
if shouldSignal {
mset.signalConsumers()
}
}
// Increase the delivery count for this message.
// ONLY used on redelivery semantics.
// Lock should be held.
func (o *Consumer) incDeliveryCount(sseq uint64) uint64 {
if o.rdc == nil {
o.rdc = make(map[uint64]uint64)
}
o.rdc[sseq] += 1
return o.rdc[sseq] + 1
}
// send a delivery exceeded advisory.
func (o *Consumer) notifyDeliveryExceeded(sseq, dcount uint64) {
e := JSConsumerDeliveryExceededAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerDeliveryExceededAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: o.stream,
Consumer: o.name,
StreamSeq: sseq,
Deliveries: dcount,
}
j, err := json.MarshalIndent(e, "", " ")
if err != nil {
return
}
o.sendAdvisory(o.deliveryExcEventT, j)
}
// Check to see if the candidate subject matches a filter if its present.
func (o *Consumer) isFilteredMatch(subj string) bool {
if !o.filterWC {
return subj == o.config.FilterSubject
}
// If we are here we have a wildcard filter subject.
// TODO(dlc) at speed might be better to just do a sublist with L2 and/or possibly L1.
return subjectIsSubsetMatch(subj, o.config.FilterSubject)
}
// Get next available message from underlying store.
// Is partition aware and redeliver aware.
// Lock should be held.
func (o *Consumer) getNextMsg() (subj string, hdr, msg []byte, seq uint64, dcount uint64, ts int64, err error) {
if o.mset == nil {
return _EMPTY_, nil, nil, 0, 0, 0, fmt.Errorf("consumer not valid")
}
for {
seq, dcount := o.sseq, uint64(1)
if len(o.rdq) > 0 {
seq = o.rdq[0]
o.rdq = append(o.rdq[:0], o.rdq[1:]...)
dcount = o.incDeliveryCount(seq)
if o.maxdc > 0 && dcount > o.maxdc {
// Only send once
if dcount == o.maxdc+1 {
o.notifyDeliveryExceeded(seq, dcount-1)
}
// Make sure to remove from pending.
delete(o.pending, seq)
continue
}
}
subj, hdr, msg, ts, err := o.mset.store.LoadMsg(seq)
if err == nil {
if dcount == 1 { // First delivery.
o.sseq++
if o.config.FilterSubject != _EMPTY_ && !o.isFilteredMatch(subj) {
continue
}
}
// We have the msg here.
return subj, hdr, msg, seq, dcount, ts, nil
}
// We got an error here. If this is an EOF we will return, otherwise
// we can continue looking.
if err == ErrStoreEOF || err == ErrStoreClosed {
return _EMPTY_, nil, nil, 0, 0, 0, err
}
// Skip since its probably deleted or expired.
o.sseq++
}
}
// Will check to make sure those waiting still have registered interest.
func (o *Consumer) checkWaitingForInterest() bool {
for len(o.waiting) > 0 {
rr := o.acc.sl.Match(o.waiting[0])
if len(rr.psubs)+len(rr.qsubs) > 0 {
break
}
// No more interest so go ahead and remove this one from our list.
o.waiting = append(o.waiting[:0], o.waiting[1:]...)
}
return len(o.waiting) > 0
}
func (o *Consumer) loopAndDeliverMsgs(s *Server, a *Account) {
// On startup check to see if we are in a a reply situtation where replay policy is not instant.
var (
lts int64 // last time stamp seen, used for replay.
lseq uint64
)
o.mu.Lock()
if o.replay {
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
}
lseq = o.mset.State().LastSeq
}
o.mu.Unlock()
// Deliver all the msgs we have now, once done or on a condition, we wait for new ones.
for {
var (
mset *Stream
seq, dcnt uint64
subj, dsubj string
hdr []byte
msg []byte
err error
ts int64
delay time.Duration
)
o.mu.Lock()
// consumer is closed when mset is set to nil.
if o.mset == nil {
o.mu.Unlock()
return
}
mset = o.mset
// If we are in push mode and not active let's stop sending.
if o.isPushMode() && !o.active {
goto waitForMsgs
}
// If we are in pull mode and no one is waiting already break and wait.
if o.isPullMode() && !o.checkWaitingForInterest() {
goto waitForMsgs
}
subj, hdr, msg, seq, dcnt, ts, err = o.getNextMsg()
// On error either wait or return.
if err != nil {
if err == ErrStoreMsgNotFound || err == ErrStoreEOF {
goto waitForMsgs
} else {
o.mu.Unlock()
return
}
}
if len(o.waiting) > 0 {
dsubj = o.waiting[0]
o.waiting = append(o.waiting[:0], o.waiting[1:]...)
} else {
dsubj = o.dsubj
}
// If we are in a replay scenario and have not caught up check if we need to delay here.
if o.replay && lts > 0 {
if delay = time.Duration(ts - lts); delay > time.Millisecond {
qch := o.qch
o.mu.Unlock()
select {
case <-qch:
return
case <-time.After(delay):
}
o.mu.Lock()
}
}
// Track this regardless.
lts = ts
o.deliverMsg(dsubj, subj, hdr, msg, seq, dcnt, ts)
o.mu.Unlock()
continue
waitForMsgs:
// If we were in a replay state check to see if we are caught up. If so clear.
if o.replay && o.sseq > lseq {
o.replay = false
}
// We will wait here for new messages to arrive.
o.mu.Unlock()
mset.waitForMsgs()
}
}
func (o *Consumer) ackReply(sseq, dseq, dcount uint64, ts int64) string {
return fmt.Sprintf(o.ackReplyT, dcount, sseq, dseq, ts)
}
// deliverCurrentMsg is the hot path to deliver a message that was just received.
// Will return if the message was delivered or not.
func (o *Consumer) deliverCurrentMsg(subj string, hdr, msg []byte, seq uint64, ts int64) bool {
o.mu.Lock()
if seq != o.sseq {
o.mu.Unlock()
return false
}
// If we are in push mode and not active let's stop sending.
if o.isPushMode() && !o.active {
o.mu.Unlock()
return false
}
// If we are in pull mode and no one is waiting already break and wait.
if o.isPullMode() && !o.checkWaitingForInterest() {
o.mu.Unlock()
return false
}
// Bump store sequence here.
o.sseq++
// If we are partitioned and we do not match, do not consider this a failure.
// Go ahead and return true.
if o.config.FilterSubject != _EMPTY_ && !o.isFilteredMatch(subj) {
o.mu.Unlock()
return true
}
var dsubj string
if len(o.waiting) > 0 {
dsubj = o.waiting[0]
o.waiting = append(o.waiting[:0], o.waiting[1:]...)
} else {
dsubj = o.dsubj
}
if len(msg) > 0 {
msg = append(msg[:0:0], msg...)
}
o.deliverMsg(dsubj, subj, hdr, msg, seq, 1, ts)
o.mu.Unlock()
return true
}
// Deliver a msg to the observable.
// Lock should be held and o.mset validated to be non-nil.
func (o *Consumer) deliverMsg(dsubj, subj string, hdr, msg []byte, seq, dcount uint64, ts int64) {
if o.mset == nil {
return
}
pmsg := &jsPubMsg{dsubj, subj, o.ackReply(seq, o.dseq, dcount, ts), hdr, msg, o, seq}
sendq := o.mset.sendq
// This needs to be unlocked since the other side may need this lock on failed delivery.
o.mu.Unlock()
sendq <- pmsg
o.mu.Lock()
ap := o.config.AckPolicy
if ap == AckNone {
o.adflr = o.dseq
o.asflr = seq
} else if ap == AckExplicit || ap == AckAll {
o.trackPending(seq)
}
o.dseq++
o.updateStore()
}
// Tracks our outstanding pending acks. Only applicable to AckExplicit mode.
// Lock should be held.
func (o *Consumer) trackPending(seq uint64) {
if o.pending == nil {
o.pending = make(map[uint64]int64)
}
if o.ptmr == nil {
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
}
o.pending[seq] = time.Now().UnixNano()
}
// didNotDeliver is called when a delivery for a consumer message failed.
// Depending on our state, we will process the failure.
func (o *Consumer) didNotDeliver(seq uint64) {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return
}
shouldSignal := false
if o.isPushMode() {
o.active = false
} else if o.pending != nil {
// push mode and we have pending.
if _, ok := o.pending[seq]; ok {
// We found this messsage on pending, we need
// to queue it up for immediate redelivery since
// we know it was not delivered.
if !o.onRedeliverQueue(seq) {
o.rdq = append(o.rdq, seq)
shouldSignal = true
}
}
}
o.mu.Unlock()
if shouldSignal {
mset.signalConsumers()
}
}
// This checks if we already have this sequence queued for redelivery.
// FIXME(dlc) - This is O(n) but should be fast with small redeliver size.
// Lock should be held.
func (o *Consumer) onRedeliverQueue(seq uint64) bool {
for _, rseq := range o.rdq {
if rseq == seq {
return true
}
}
return false
}
// Remove a sequence from the redelivery queue.
// Lock should be held.
func (o *Consumer) removeFromRedeliverQueue(seq uint64) bool {
for i, rseq := range o.rdq {
if rseq == seq {
o.rdq = append(o.rdq[:i], o.rdq[i+1:]...)
return true
}
}
return false
}
// Checks the pending messages.
func (o *Consumer) checkPending() {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return
}
ttl := int64(o.config.AckWait)
next := int64(o.ackWait(0))
now := time.Now().UnixNano()
shouldSignal := false
// Since we can update timestamps, we have to review all pending.
// We may want to unlock here or warn if list is big.
// We also need to sort after.
var expired []uint64
for seq, ts := range o.pending {
elapsed := now - ts
if elapsed >= ttl {
if !o.onRedeliverQueue(seq) {
expired = append(expired, seq)
shouldSignal = true
}
} else if ttl-elapsed < next {
// Update when we should fire next.
next = ttl - elapsed
}
}
if len(expired) > 0 {
sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
o.rdq = append(o.rdq, expired...)
// Now we should update the timestamp here since we are redelivering.
// We will use an incrementing time to preserve order for any other redelivery.
off := now - o.pending[expired[0]]
for _, seq := range expired {
o.pending[seq] += off
}
}
if len(o.pending) > 0 {
o.ptmr.Reset(o.ackWait(time.Duration(next)))
} else {
o.ptmr.Stop()
o.ptmr = nil
}
o.mu.Unlock()
if shouldSignal {
mset.signalConsumers()
}
}
// SeqFromReply will extract a sequence number from a reply subject.
func (o *Consumer) SeqFromReply(reply string) uint64 {
_, seq, _, _ := o.ReplyInfo(reply)
return seq
}
// StreamSeqFromReply will extract the stream sequence from the reply subject.
func (o *Consumer) StreamSeqFromReply(reply string) uint64 {
seq, _, _, _ := o.ReplyInfo(reply)
return seq
}
// Grab encoded information in the reply subject for a delivered message.
func (o *Consumer) ReplyInfo(reply string) (sseq, dseq, dcount uint64, ts int64) {
n, err := fmt.Sscanf(reply, o.ackReplyT, &dcount, &sseq, &dseq, &ts)
if err != nil || n != 4 {
return 0, 0, 0, 0
}
return
}
// NextSeq returns the next delivered sequence number for this observable.
func (o *Consumer) NextSeq() uint64 {
o.mu.Lock()
dseq := o.dseq
o.mu.Unlock()
return dseq
}
// This will select the store seq to start with based on the
// partition subject.
func (o *Consumer) selectSubjectLast() {
stats := o.mset.store.State()
if stats.LastSeq == 0 {
o.sseq = stats.LastSeq
return
}
// FIXME(dlc) - this is linear and can be optimized by store layer.
for seq := stats.LastSeq; seq >= stats.FirstSeq; seq-- {
subj, _, _, _, err := o.mset.store.LoadMsg(seq)
if err == ErrStoreMsgNotFound {
continue
}
if o.isFilteredMatch(subj) {
o.sseq = seq
return
}
}
}
// Will select the starting sequence.
func (o *Consumer) selectStartingSeqNo() {
stats := o.mset.store.State()
if o.config.OptStartSeq == 0 {
if o.config.DeliverPolicy == DeliverAll {
o.sseq = stats.FirstSeq
} else if o.config.DeliverPolicy == DeliverLast {
o.sseq = stats.LastSeq
// If we are partitioned here we may need to walk backwards.
if o.config.FilterSubject != _EMPTY_ {
o.selectSubjectLast()
}
} else if o.config.OptStartTime != nil {
// If we are here we are time based.
// TODO(dlc) - Once clustered can't rely on this.
o.sseq = o.mset.store.GetSeqFromTime(*o.config.OptStartTime)
} else {
// Default is deliver new only.
o.sseq = stats.LastSeq + 1
}
} else {
o.sseq = o.config.OptStartSeq
}
if stats.FirstSeq == 0 {
o.sseq = 1
} else if o.sseq < stats.FirstSeq {
o.sseq = stats.FirstSeq
} else if o.sseq > stats.LastSeq {
o.sseq = stats.LastSeq + 1
}
// Always set delivery sequence to 1.
o.dseq = 1
// Set ack delivery floor to delivery-1
o.adflr = o.dseq - 1
// Set ack store floor to store-1
o.asflr = o.sseq - 1
}
// Test whether a config represents a durable subscriber.
func isDurableConsumer(config *ConsumerConfig) bool {
return config != nil && config.Durable != _EMPTY_
}
func (o *Consumer) isDurable() bool {
return o.config.Durable != _EMPTY_
}
// Are we in push mode, delivery subject, etc.
func (o *Consumer) isPushMode() bool {
return o.config.DeliverSubject != _EMPTY_
}
func (o *Consumer) isPullMode() bool {
return o.config.DeliverSubject == _EMPTY_
}
// Name returns the name of this observable.
func (o *Consumer) Name() string {
o.mu.Lock()
n := o.name
o.mu.Unlock()
return n
}
// For now size of 6 for randomly created names.
const randConsumerNameLen = 6
func createConsumerName() string {
var b [256]byte
rand.Read(b[:])
sha := sha256.New()
sha.Write(b[:])
return fmt.Sprintf("%x", sha.Sum(nil))[:randConsumerNameLen]
}
// DeleteConsumer will delete the consumer from this stream.
func (mset *Stream) DeleteConsumer(o *Consumer) error {
return o.Delete()
}
// Active indicates if this consumer is still active.
func (o *Consumer) Active() bool {
o.mu.Lock()
active := o.active && o.mset != nil
o.mu.Unlock()
return active
}
// hasNoLocalInterest return true if we have no local interest.
func (o *Consumer) hasNoLocalInterest() bool {
o.mu.Lock()
rr := o.acc.sl.Match(o.config.DeliverSubject)
o.mu.Unlock()
return len(rr.psubs)+len(rr.qsubs) == 0
}
// This is when the underlying stream has been purged.
func (o *Consumer) purge(sseq uint64) {
o.mu.Lock()
o.sseq = sseq
o.asflr = sseq - 1
o.adflr = o.dseq - 1
if len(o.pending) > 0 {
o.pending = nil
if o.ptmr != nil {
o.ptmr.Stop()
// Do not nil this out here. This allows checkPending to fire
// and still be ok and not panic.
}
}
// We need to remove all those being queued for redelivery under o.rdq
if len(o.rdq) > 0 {
var newRDQ []uint64
for _, sseq := range o.rdq {
if sseq >= o.sseq {
newRDQ = append(newRDQ, sseq)
}
}
// Replace with new list. Most of the time this will be nil.
o.rdq = newRDQ
}
o.mu.Unlock()
}
func stopAndClearTimer(tp **time.Timer) {
if *tp == nil {
return
}
// Will get drained in normal course, do not try to
// drain here.
(*tp).Stop()
*tp = nil
}
// Stop will shutdown the consumer for the associated stream.
func (o *Consumer) Stop() error {
return o.stop(false, true, false)
}
func (o *Consumer) deleteWithoutAdvisory() error {
return o.stop(true, true, false)
}
// Delete will delete the consumer for the associated stream and send advisories.
func (o *Consumer) Delete() error {
return o.stop(true, true, true)
}
func (o *Consumer) stop(dflag, doSignal, advisory bool) error {
o.mu.Lock()
mset := o.mset
if mset == nil {
o.mu.Unlock()
return nil
}
if dflag && advisory {
o.sendDeleteAdvisoryLocked()
}
a := o.acc
close(o.qch)
store := o.store
o.mset = nil
o.active = false
ackSub := o.ackSub
reqSub := o.reqSub
o.ackSub = nil
o.reqSub = nil
stopAndClearTimer(&o.ptmr)
stopAndClearTimer(&o.dtmr)
delivery := o.config.DeliverSubject
o.mu.Unlock()
if delivery != "" {
a.sl.ClearNotification(delivery, o.inch)
}
mset.mu.Lock()
// Break us out of the readLoop.
// TODO(dlc) - Should not be bad for small amounts of observables, maybe
// even into thousands. Above that should check what this might do
// performance wise.
if doSignal {
mset.sg.Broadcast()
}
mset.unsubscribe(ackSub)
mset.unsubscribe(reqSub)
delete(mset.consumers, o.name)
mset.mu.Unlock()
// Make sure we stamp our update state
if !dflag {
o.writeState()
}
var err error
if store != nil {
if dflag {
err = store.Delete()
} else {
err = store.Stop()
}
}
return err
}
// Check that we do not form a cycle by delivering to a delivery subject
// that is part of the interest group.
func (mset *Stream) deliveryFormsCycle(deliverySubject string) bool {
mset.mu.Lock()
defer mset.mu.Unlock()
for _, subject := range mset.config.Subjects {
if subjectIsSubsetMatch(deliverySubject, subject) {
return true
}
}
return false
}
// This is same as check for delivery cycle.
func (mset *Stream) validSubject(partitionSubject string) bool {
return mset.deliveryFormsCycle(partitionSubject)
}
// SetInActiveDeleteThreshold sets the delete threshold for how long to wait
// before deleting an inactive ephemeral observable.
func (o *Consumer) SetInActiveDeleteThreshold(dthresh time.Duration) error {
o.mu.Lock()
defer o.mu.Unlock()
if o.isPullMode() {
return fmt.Errorf("consumer is not push-based")
}
if o.isDurable() {
return fmt.Errorf("consumer is not durable")
}
deleteWasRunning := o.dtmr != nil
stopAndClearTimer(&o.dtmr)
o.dthresh = dthresh
if deleteWasRunning {
o.dtmr = time.AfterFunc(o.dthresh, func() { o.Delete() })
}
return nil
}
// switchToEphemeral is called on startup when recovering ephemerals.
func (o *Consumer) switchToEphemeral() {
o.mu.Lock()
o.config.Durable = _EMPTY_
store, ok := o.store.(*consumerFileStore)
rr := o.acc.sl.Match(o.config.DeliverSubject)
o.mu.Unlock()
// Update interest
o.updateDeliveryInterest(len(rr.psubs)+len(rr.qsubs) > 0)
// Write out new config
if ok {
store.updateConfig(o.config)
}
}
// RequestNextMsgSubject returns the subject to request the next message when in pull or worker mode.
// Returns empty otherwise.
func (o *Consumer) RequestNextMsgSubject() string {
return o.nextMsgSubj
}
| 1 | 11,114 | would these require any account lock here? | nats-io-nats-server | go |
@@ -142,7 +142,7 @@ class RemoteConnection(object):
:Returns:
Timeout value in seconds for all http requests made to the Remote Connection
"""
- return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT or cls._timeout
+ return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout
@classmethod
def set_timeout(cls, timeout): | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import socket
import string
import base64
try:
import http.client as httplib
from urllib import request as url_request
from urllib import parse
except ImportError: # above is available in py3+, below is py2.7
import httplib as httplib
import urllib2 as url_request
import urlparse as parse
from .command import Command
from .errorhandler import ErrorCode
from . import utils
LOGGER = logging.getLogger(__name__)
class Request(url_request.Request):
"""
Extends the url_request.Request to support all HTTP request types.
"""
def __init__(self, url, data=None, method=None):
"""
Initialise a new HTTP request.
:Args:
- url - String for the URL to send the request to.
- data - Data to send with the request.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method != 'POST' and method != 'PUT':
data = None
self._method = method
url_request.Request.__init__(self, url, data=data)
def get_method(self):
"""
Returns the HTTP method used by this request.
"""
return self._method
class Response(object):
"""
Represents an HTTP response.
"""
def __init__(self, fp, code, headers, url):
"""
Initialise a new Response.
:Args:
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- headers - A dictionary of headers returned by the server.
- url - URL of the retrieved resource represented by this Response.
"""
self.fp = fp
self.read = fp.read
self.code = code
self.headers = headers
self.url = url
def close(self):
"""
Close the response body file object.
"""
self.read = None
self.fp = None
def info(self):
"""
Returns the response headers.
"""
return self.headers
def geturl(self):
"""
Returns the URL for the resource returned in this response.
"""
return self.url
class HttpErrorHandler(url_request.HTTPDefaultErrorHandler):
"""
A custom HTTP error handler.
Used to return Response objects instead of raising an HTTPError exception.
"""
def http_error_default(self, req, fp, code, msg, headers):
"""
Default HTTP error handler.
:Args:
- req - The original Request object.
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- msg - The HTTP status message returned by the server.
- headers - The response headers.
:Returns:
A new Response object.
"""
return Response(fp, code, headers, req.get_full_url())
class RemoteConnection(object):
"""A connection with the Remote WebDriver server.
Communicates with the server using the WebDriver wire protocol:
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol"""
_timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_timeout(cls):
"""
:Returns:
Timeout value in seconds for all http requests made to the Remote Connection
"""
return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT or cls._timeout
@classmethod
def set_timeout(cls, timeout):
"""
Override the default timeout
:Args:
- timeout - timeout value for http requests in seconds
"""
cls._timeout = timeout
@classmethod
def reset_timeout(cls):
"""
Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT
"""
cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT
def __init__(self, remote_server_addr, keep_alive=False):
# Attempt to resolve the hostname and get an IP address.
self.keep_alive = keep_alive
parsed_url = parse.urlparse(remote_server_addr)
addr = ""
if parsed_url.hostname:
try:
netloc = socket.gethostbyname(parsed_url.hostname)
addr = netloc
if parsed_url.port:
netloc += ':%d' % parsed_url.port
if parsed_url.username:
auth = parsed_url.username
if parsed_url.password:
auth += ':%s' % parsed_url.password
netloc = '%s@%s' % (auth, netloc)
remote_server_addr = parse.urlunparse(
(parsed_url.scheme, netloc, parsed_url.path,
parsed_url.params, parsed_url.query, parsed_url.fragment))
except socket.gaierror:
LOGGER.info('Could not get IP address for host: %s' % parsed_url.hostname)
self._url = remote_server_addr
if keep_alive:
self._conn = httplib.HTTPConnection(
str(addr), str(parsed_url.port), timeout=self._timeout)
self._commands = {
Command.STATUS: ('GET', '/status'),
Command.NEW_SESSION: ('POST', '/session'),
Command.GET_ALL_SESSIONS: ('GET', '/sessions'),
Command.QUIT: ('DELETE', '/session/$sessionId'),
Command.GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window_handle'),
Command.GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window_handles'),
Command.GET: ('POST', '/session/$sessionId/url'),
Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'),
Command.GO_BACK: ('POST', '/session/$sessionId/back'),
Command.REFRESH: ('POST', '/session/$sessionId/refresh'),
Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'),
Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'),
Command.GET_TITLE: ('GET', '/session/$sessionId/title'),
Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'),
Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'),
Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/screenshot/$id'),
Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'),
Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'),
Command.GET_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/element/active'),
Command.FIND_CHILD_ELEMENT:
('POST', '/session/$sessionId/element/$id/element'),
Command.FIND_CHILD_ELEMENTS:
('POST', '/session/$sessionId/element/$id/elements'),
Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'),
Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'),
Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'),
Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'),
Command.SEND_KEYS_TO_ELEMENT:
('POST', '/session/$sessionId/element/$id/value'),
Command.SEND_KEYS_TO_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/keys'),
Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"),
Command.GET_ELEMENT_VALUE:
('GET', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_TAG_NAME:
('GET', '/session/$sessionId/element/$id/name'),
Command.IS_ELEMENT_SELECTED:
('GET', '/session/$sessionId/element/$id/selected'),
Command.SET_ELEMENT_SELECTED:
('POST', '/session/$sessionId/element/$id/selected'),
Command.IS_ELEMENT_ENABLED:
('GET', '/session/$sessionId/element/$id/enabled'),
Command.IS_ELEMENT_DISPLAYED:
('GET', '/session/$sessionId/element/$id/displayed'),
Command.GET_ELEMENT_LOCATION:
('GET', '/session/$sessionId/element/$id/location'),
Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW:
('GET', '/session/$sessionId/element/$id/location_in_view'),
Command.GET_ELEMENT_SIZE:
('GET', '/session/$sessionId/element/$id/size'),
Command.GET_ELEMENT_RECT:
('GET', '/session/$sessionId/element/$id/rect'),
Command.GET_ELEMENT_ATTRIBUTE:
('GET', '/session/$sessionId/element/$id/attribute/$name'),
Command.ELEMENT_EQUALS:
('GET', '/session/$sessionId/element/$id/equals/$other'),
Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'),
Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'),
Command.DELETE_ALL_COOKIES:
('DELETE', '/session/$sessionId/cookie'),
Command.DELETE_COOKIE:
('DELETE', '/session/$sessionId/cookie/$name'),
Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'),
Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'),
Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'),
Command.CLOSE: ('DELETE', '/session/$sessionId/window'),
Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY:
('GET', '/session/$sessionId/element/$id/css/$propertyName'),
Command.IMPLICIT_WAIT:
('POST', '/session/$sessionId/timeouts/implicit_wait'),
Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'),
Command.SET_SCRIPT_TIMEOUT:
('POST', '/session/$sessionId/timeouts/async_script'),
Command.SET_TIMEOUTS:
('POST', '/session/$sessionId/timeouts'),
Command.DISMISS_ALERT:
('POST', '/session/$sessionId/dismiss_alert'),
Command.ACCEPT_ALERT:
('POST', '/session/$sessionId/accept_alert'),
Command.SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert_text'),
Command.GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert_text'),
Command.CLICK:
('POST', '/session/$sessionId/click'),
Command.DOUBLE_CLICK:
('POST', '/session/$sessionId/doubleclick'),
Command.MOUSE_DOWN:
('POST', '/session/$sessionId/buttondown'),
Command.MOUSE_UP:
('POST', '/session/$sessionId/buttonup'),
Command.MOVE_TO:
('POST', '/session/$sessionId/moveto'),
Command.GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/$windowHandle/size'),
Command.SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/$windowHandle/size'),
Command.GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/$windowHandle/position'),
Command.SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/$windowHandle/position'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.SET_SCREEN_ORIENTATION:
('POST', '/session/$sessionId/orientation'),
Command.GET_SCREEN_ORIENTATION:
('GET', '/session/$sessionId/orientation'),
Command.SINGLE_TAP:
('POST', '/session/$sessionId/touch/click'),
Command.TOUCH_DOWN:
('POST', '/session/$sessionId/touch/down'),
Command.TOUCH_UP:
('POST', '/session/$sessionId/touch/up'),
Command.TOUCH_MOVE:
('POST', '/session/$sessionId/touch/move'),
Command.TOUCH_SCROLL:
('POST', '/session/$sessionId/touch/scroll'),
Command.DOUBLE_TAP:
('POST', '/session/$sessionId/touch/doubleclick'),
Command.LONG_PRESS:
('POST', '/session/$sessionId/touch/longclick'),
Command.FLICK:
('POST', '/session/$sessionId/touch/flick'),
Command.EXECUTE_SQL:
('POST', '/session/$sessionId/execute_sql'),
Command.GET_LOCATION:
('GET', '/session/$sessionId/location'),
Command.SET_LOCATION:
('POST', '/session/$sessionId/location'),
Command.GET_APP_CACHE:
('GET', '/session/$sessionId/application_cache'),
Command.GET_APP_CACHE_STATUS:
('GET', '/session/$sessionId/application_cache/status'),
Command.CLEAR_APP_CACHE:
('DELETE', '/session/$sessionId/application_cache/clear'),
Command.GET_NETWORK_CONNECTION:
('GET', '/session/$sessionId/network_connection'),
Command.SET_NETWORK_CONNECTION:
('POST', '/session/$sessionId/network_connection'),
Command.GET_LOCAL_STORAGE_ITEM:
('GET', '/session/$sessionId/local_storage/key/$key'),
Command.REMOVE_LOCAL_STORAGE_ITEM:
('DELETE', '/session/$sessionId/local_storage/key/$key'),
Command.GET_LOCAL_STORAGE_KEYS:
('GET', '/session/$sessionId/local_storage'),
Command.SET_LOCAL_STORAGE_ITEM:
('POST', '/session/$sessionId/local_storage'),
Command.CLEAR_LOCAL_STORAGE:
('DELETE', '/session/$sessionId/local_storage'),
Command.GET_LOCAL_STORAGE_SIZE:
('GET', '/session/$sessionId/local_storage/size'),
Command.GET_SESSION_STORAGE_ITEM:
('GET', '/session/$sessionId/session_storage/key/$key'),
Command.REMOVE_SESSION_STORAGE_ITEM:
('DELETE', '/session/$sessionId/session_storage/key/$key'),
Command.GET_SESSION_STORAGE_KEYS:
('GET', '/session/$sessionId/session_storage'),
Command.SET_SESSION_STORAGE_ITEM:
('POST', '/session/$sessionId/session_storage'),
Command.CLEAR_SESSION_STORAGE:
('DELETE', '/session/$sessionId/session_storage'),
Command.GET_SESSION_STORAGE_SIZE:
('GET', '/session/$sessionId/session_storage/size'),
Command.GET_LOG:
('POST', '/session/$sessionId/log'),
Command.GET_AVAILABLE_LOG_TYPES:
('GET', '/session/$sessionId/log/types'),
Command.CURRENT_CONTEXT_HANDLE:
('GET', '/session/$sessionId/context'),
Command.CONTEXT_HANDLES:
('GET', '/session/$sessionId/contexts'),
Command.SWITCH_TO_CONTEXT:
('POST', '/session/$sessionId/context'),
}
def execute(self, command, params):
"""
Send a command to the remote server.
Any path subtitutions required for the URL mapped to the command should be
included in the command parameters.
:Args:
- command - A string specifying the command to execute.
- params - A dictionary of named parameters to send with the command as
its JSON payload.
"""
command_info = self._commands[command]
assert command_info is not None, 'Unrecognised command %s' % command
data = utils.dump_json(params)
path = string.Template(command_info[1]).substitute(params)
url = '%s%s' % (self._url, path)
return self._request(command_info[0], url, body=data)
def _request(self, method, url, body=None):
"""
Send an HTTP request to the remote server.
:Args:
- method - A string for the HTTP method to send the request with.
- url - A string for the URL to send the request to.
- body - A string for request body. Ignored unless method is POST or PUT.
:Returns:
A dictionary with the server's parsed JSON response.
"""
LOGGER.debug('%s %s %s' % (method, url, body))
parsed_url = parse.urlparse(url)
if self.keep_alive:
headers = {"Connection": 'keep-alive', method: parsed_url.path,
"User-Agent": "Python http auth",
"Content-type": "application/json;charset=\"UTF-8\"",
"Accept": "application/json"}
if parsed_url.username:
auth = base64.standard_b64encode('%s:%s' %
(parsed_url.username, parsed_url.password)).replace('\n', '')
headers["Authorization"] = "Basic %s" % auth
if body and method != 'POST' and method != 'PUT':
body = None
try:
self._conn.request(method, parsed_url.path, body, headers)
resp = self._conn.getresponse()
except (httplib.HTTPException, socket.error):
self._conn.close()
raise
statuscode = resp.status
else:
password_manager = None
if parsed_url.username:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%s" % parsed_url.port
cleaned_url = parse.urlunparse((parsed_url.scheme,
netloc,
parsed_url.path,
parsed_url.params,
parsed_url.query,
parsed_url.fragment))
password_manager = url_request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None,
"%s://%s" % (parsed_url.scheme, netloc),
parsed_url.username,
parsed_url.password)
request = Request(cleaned_url, data=body.encode('utf-8'), method=method)
else:
request = Request(url, data=body.encode('utf-8'), method=method)
request.add_header('Accept', 'application/json')
request.add_header('Content-Type', 'application/json;charset=UTF-8')
if password_manager:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler(),
url_request.HTTPBasicAuthHandler(password_manager))
else:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler())
resp = opener.open(request, timeout=self._timeout)
statuscode = resp.code
if not hasattr(resp, 'getheader'):
if hasattr(resp.headers, 'getheader'):
resp.getheader = lambda x: resp.headers.getheader(x)
elif hasattr(resp.headers, 'get'):
resp.getheader = lambda x: resp.headers.get(x)
data = resp.read()
try:
if 300 <= statuscode < 304:
return self._request('GET', resp.getheader('location'))
body = data.decode('utf-8').replace('\x00', '').strip()
if 399 < statuscode < 500:
return {'status': statuscode, 'value': body}
content_type = []
if resp.getheader('Content-Type') is not None:
content_type = resp.getheader('Content-Type').split(';')
if not any([x.startswith('image/png') for x in content_type]):
try:
data = utils.load_json(body.strip())
except ValueError:
if 199 < statuscode < 300:
status = ErrorCode.SUCCESS
else:
status = ErrorCode.UNKNOWN_ERROR
return {'status': status, 'value': body.strip()}
assert type(data) is dict, (
'Invalid server response body: %s' % body)
assert 'status' in data, (
'Invalid server response; no status: %s' % body)
# Some of the drivers incorrectly return a response
# with no 'value' field when they should return null.
if 'value' not in data:
data['value'] = None
return data
else:
data = {'status': 0, 'value': body.strip()}
return data
finally:
LOGGER.debug("Finished Request")
resp.close()
| 1 | 12,281 | The else doesn't return anything? | SeleniumHQ-selenium | js |
@@ -112,6 +112,7 @@ function TransactionBuilder(opts) {
if (opts.fee || opts.feeSat) {
this.givenFeeSat = opts.fee ? opts.fee * util.COIN : opts.feeSat;
}
+ if (opts.fee == 0 || opts.feeSat == 0) this.givenFeeSat = 0;
this.remainderOut = opts.remainderOut;
this.signhash = opts.signhash || Transaction.SIGHASH_ALL;
| 1 | // TransactionBuilder
// ==================
//
// Creates a bitcore Transaction object
//
//
// Synopsis
// --------
// ```
// var tx = (new TransactionBuilder(opts))
// .setUnspent(utxos)
// .setOutputs(outs)
// .sign(keys)
// .build();
//
//
// var builder = (new TransactionBuilder(opts))
// .setUnspent(spent)
// .setOutputs(outs);
//
// // Uncomplete tx (no signed or partially signed)
// var tx = builder.build();
//
// ..later..
//
// builder.sign(keys);
// while ( builder.isFullySigned() ) {
//
// ... get new keys ...
//
// builder.sign(keys);
// }
//
// var tx = builder.build();
// broadcast(tx.serialize());
//
// //Serialize it and pass it around...
// var string = JSON.stringify(builder.toObj());
// // then...
// var builder = TransactionBuilder.fromObj(JSON.parse(str);
// builder.sign(keys);
// // Also
// var builder2 = TransactionBuilder.fromObj(JSON.parse(str2);
// builder2.merge(builder); // Will merge signatures for p2sh mulsig txs.
//
//
// ```
//
//
//
'use strict';
var Address = require('./Address');
var Script = require('./Script');
var util = require('../util');
var bignum = require('bignum');
var buffertools = require('buffertools');
var networks = require('../networks');
var WalletKey = require('./WalletKey');
var PrivateKey = require('./PrivateKey');
var Key = require('./Key');
var log = require('../util/log');
var Transaction = require('./Transaction');
var FEE_PER_1000B_SAT = parseInt(0.0001 * util.COIN);
var TOOBJ_VERSION = 1;
// Methods
// -------
//
// TransactionBuilder
// ------------------
// Creates a TransactionBuilder instance
// `opts`
// ```
// {
// remainderOut: null,
// fee: 0.001,
// lockTime: null,
// spendUnconfirmed: false,
// signhash: SIGHASH_ALL
// }
// ```
// Amounts are in BTC. instead of fee and amount; feeSat and amountSat can be given,
// repectively, to provide amounts in satoshis.
//
// If no remainderOut is given, and there are remainder coins, the
// first IN out will be used to return the coins. remainderOut has the form:
// ```
// remainderOut = { address: 1xxxxx}
// ```
// or
// ```
// remainderOut = { pubkeys: ['hex1','hex2',...} for multisig
// ```
function TransactionBuilder(opts) {
opts = opts || {};
this.vanilla = {};
this.vanilla.scriptSig = [];
this.vanilla.opts = JSON.stringify(opts);
// If any default opts is changed, TOOBJ_VERSION should be changed as
// a caution measure.
this.lockTime = opts.lockTime || 0;
this.spendUnconfirmed = opts.spendUnconfirmed || false;
if (opts.fee || opts.feeSat) {
this.givenFeeSat = opts.fee ? opts.fee * util.COIN : opts.feeSat;
}
this.remainderOut = opts.remainderOut;
this.signhash = opts.signhash || Transaction.SIGHASH_ALL;
this.tx = {};
this.inputsSigned = 0;
return this;
}
TransactionBuilder.FEE_PER_1000B_SAT = FEE_PER_1000B_SAT;
TransactionBuilder._scriptForPubkeys = function(out) {
var l = out.pubkeys.length;
var pubKeyBuf = [];
for (var i = 0; i < l; i++) {
pubKeyBuf.push(new Buffer(out.pubkeys[i], 'hex'));
}
return Script.createMultisig(out.nreq, pubKeyBuf);
};
TransactionBuilder._scriptForOut = function(out) {
var ret;
if (out.address)
ret = new Address(out.address).getScriptPubKey();
else if (out.pubkeys || out.nreq || out.nreq > 1)
ret = this._scriptForPubkeys(out);
else
throw new Error('unknown out type');
return ret;
};
TransactionBuilder.infoForP2sh = function(opts, networkName) {
var script = this._scriptForOut(opts);
var hash = util.sha256ripe160(script.getBuffer());
var version = networkName === 'testnet' ?
networks.testnet.P2SHVersion : networks.livenet.P2SHVersion;
var addr = new Address(version, hash);
var addrStr = addr.as('base58');
return {
script: script,
scriptBufHex: script.getBuffer().toString('hex'),
hash: hash,
address: addrStr,
};
};
// setUnspent
// ----------
// Sets the `unspent` available for the transaction. Some (or all)
// of them to fullfil the transaction's outputs and fee.
// The expected format is:
// ```
// [{
// address: "mqSjTad2TKbPcKQ3Jq4kgCkKatyN44UMgZ",
// txid: "2ac165fa7a3a2b535d106a0041c7568d03b531e58aeccdd3199d7289ab12cfc1",
// scriptPubKey: "76a9146ce4e1163eb18939b1440c42844d5f0261c0338288ac",
// vout: 1,
// amount: 0.01,
// confirmations: 3
// }, ...
// ]
// ```
// This is compatible con insight's utxo API.
// That amount is in BTCs (as returned in insight and bitcoind).
// amountSat (instead of amount) can be given to provide amount in satochis.
TransactionBuilder.prototype.setUnspent = function(unspent) {
this.vanilla.utxos = JSON.stringify(unspent);
this.utxos = unspent;
return this;
};
TransactionBuilder.prototype._setInputMap = function() {
var inputMap = [];
var l = this.selectedUtxos.length;
for (var i = 0; i < l; i++) {
var utxo = this.selectedUtxos[i];
var scriptBuf = new Buffer(utxo.scriptPubKey, 'hex');
var scriptPubKey = new Script(scriptBuf);
var scriptType = scriptPubKey.classify();
if (scriptType === Script.TX_UNKNOWN)
throw new Error('Unknown scriptPubKey type at:' + i +
' Type:' + scriptPubKey.getRawOutType());
inputMap.push({
address: utxo.address,
scriptPubKey: scriptPubKey,
scriptType: scriptType,
i: i,
});
}
this.inputMap = inputMap;
return this;
};
// getSelectedUnspent
// ------------------
//
// Returns the selected unspent outputs, to be used in the transaction.
TransactionBuilder.prototype.getSelectedUnspent = function() {
return this.selectedUtxos;
};
/* _selectUnspent
* TODO(?): sort sel (at the end) and check is some inputs can be avoided.
* If the initial utxos are sorted, this step would be necesary only if
* utxos were selected from different minConfirmationSteps.
*/
TransactionBuilder.prototype._selectUnspent = function(neededAmountSat) {
if (!this.utxos || !this.utxos.length)
throw new Error('unspent not set');
var minConfirmationSteps = [6, 1];
if (this.spendUnconfirmed) minConfirmationSteps.push(0);
var sel = [],
totalSat = bignum(0),
fulfill = false,
maxConfirmations = null,
l = this.utxos.length;
do {
var minConfirmations = minConfirmationSteps.shift();
for (var i = 0; i < l; i++) {
var u = this.utxos[i];
var c = u.confirmations || 0;
if (c < minConfirmations || (maxConfirmations && c >= maxConfirmations))
continue;
var sat = u.amountSat || util.parseValue(u.amount);
totalSat = totalSat.add(sat);
sel.push(u);
if (totalSat.cmp(neededAmountSat) >= 0) {
fulfill = true;
break;
}
}
maxConfirmations = minConfirmations;
} while (!fulfill && minConfirmationSteps.length);
if (!fulfill)
throw new Error('not enough unspent tx outputs to fulfill totalNeededAmount [SAT]:' +
neededAmountSat);
this.selectedUtxos = sel;
this._setInputMap();
return this;
};
TransactionBuilder.prototype._setInputs = function(txobj) {
var ins = this.selectedUtxos;
var l = ins.length;
var valueInSat = bignum(0);
txobj.ins = [];
for (var i = 0; i < l; i++) {
valueInSat = valueInSat.add(util.parseValue(ins[i].amount));
var txin = {};
txin.s = util.EMPTY_BUFFER;
txin.q = 0xffffffff;
var hash = new Buffer(ins[i].txid, 'hex');
var hashReversed = buffertools.reverse(hash);
var vout = parseInt(ins[i].vout);
var voutBuf = new Buffer(4);
voutBuf.writeUInt32LE(vout, 0);
txin.o = Buffer.concat([hashReversed, voutBuf]);
txobj.ins.push(txin);
}
this.valueInSat = valueInSat;
return this;
};
TransactionBuilder.prototype._setFee = function(feeSat) {
if (typeof this.valueOutSat === 'undefined')
throw new Error('valueOutSat undefined');
var valueOutSat = this.valueOutSat.add(feeSat);
if (this.valueInSat.cmp(valueOutSat) < 0) {
var inv = this.valueInSat.toString();
var ouv = valueOutSat.toString();
throw new Error('transaction input amount is less than outputs: ' +
inv + ' < ' + ouv + ' [SAT]');
}
this.feeSat = feeSat;
return this;
};
TransactionBuilder.prototype._setRemainder = function(txobj, remainderIndex) {
if (typeof this.valueInSat === 'undefined' ||
typeof this.valueOutSat === 'undefined')
throw new Error('valueInSat / valueOutSat undefined');
/* add remainder (without modifying outs[]) */
var remainderSat = this.valueInSat.sub(this.valueOutSat).sub(this.feeSat);
var l = txobj.outs.length;
this.remainderSat = bignum(0);
/*remove old remainder? */
if (l > remainderIndex) {
txobj.outs.pop();
}
if (remainderSat.cmp(0) > 0) {
var remainderOut = this.remainderOut || this.selectedUtxos[0];
var value = util.bigIntToValue(remainderSat);
var script = TransactionBuilder._scriptForOut(remainderOut);
var txout = {
v: value,
s: script.getBuffer(),
};
txobj.outs.push(txout);
this.remainderSat = remainderSat;
}
return this;
};
TransactionBuilder.prototype._setFeeAndRemainder = function(txobj) {
/* starting size estimation */
var size = 500,
maxSizeK, remainderIndex = txobj.outs.length;
do {
/* based on https://en.bitcoin.it/wiki/Transaction_fees */
maxSizeK = parseInt(size / 1000) + 1;
var feeSat = this.givenFeeSat ?
this.givenFeeSat : maxSizeK * FEE_PER_1000B_SAT;
var neededAmountSat = this.valueOutSat.add(feeSat);
this._selectUnspent(neededAmountSat)
._setInputs(txobj)
._setFee(feeSat)
._setRemainder(txobj, remainderIndex);
size = new Transaction(txobj).getSize();
} while (size > (maxSizeK + 1) * 1000);
return this;
};
// setOutputs
// ----------
// Sets the outputs for the transaction. Format is:
// ```
// an array of [{
// address: xx,
// amount:0.001
// },...]
// ```
//
// Note that only some of this outputs will be selected
// to create the transaction. The selected ones can be checked
// after calling `setOutputs`, with `.getSelectedUnspent`.
// amountSatStr could be used to pass in the amount in satoshis, as a string.
//
TransactionBuilder.prototype.setOutputs = function(outs) {
this.vanilla.outs = JSON.stringify(outs);
var valueOutSat = bignum(0);
var txobj = {};
txobj.version = 1;
txobj.lock_time = this.lockTime || 0;
txobj.ins = [];
txobj.outs = [];
var l = outs.length;
for (var i = 0; i < l; i++) {
var amountSat = outs[i].amountSat || outs[i].amountSatStr ? bignum(outs[i].amountSatStr) : util.parseValue(outs[i].amount);
var value = util.bigIntToValue(amountSat);
var script = TransactionBuilder._scriptForOut(outs[i]);
var txout = {
v: value,
s: script.getBuffer(),
};
txobj.outs.push(txout);
valueOutSat = valueOutSat.add(amountSat);
}
this.valueOutSat = valueOutSat;
this._setFeeAndRemainder(txobj);
this.tx = new Transaction(txobj);
return this;
};
TransactionBuilder._mapKeys = function(keys) {
/* prepare keys */
var walletKeyMap = {};
var l = keys.length;
var wk;
for (var i = 0; i < l; i++) {
var k = keys[i];
if (typeof k === 'string') {
var pk = new PrivateKey(k);
wk = new WalletKey({
network: pk.network()
});
wk.fromObj({
priv: k
});
} else if (k instanceof WalletKey) {
wk = k;
} else {
throw new Error('argument must be an array of strings (WIF format) or WalletKey objects');
}
var addr = wk.storeObj().addr;
walletKeyMap[addr] = wk;
}
return walletKeyMap;
};
TransactionBuilder._signHashAndVerify = function(wk, txSigHash) {
var triesLeft = 10,
sigRaw;
do {
sigRaw = wk.privKey.signSync(txSigHash);
} while (wk.privKey.verifySignatureSync(txSigHash, sigRaw) === false &&
triesLeft--);
if (triesLeft < 0)
throw new Error('could not sign input: verification failed');
return sigRaw;
};
TransactionBuilder.prototype._checkTx = function() {
if (!this.tx || !this.tx.ins || !this.tx.ins.length || !this.tx.outs.length)
throw new Error('tx is not defined');
};
TransactionBuilder.prototype._multiFindKey = function(walletKeyMap, pubKeyHash) {
var wk;
[networks.livenet, networks.testnet].forEach(function(n) {
[n.addressVersion, n.P2SHVersion].forEach(function(v) {
var a = new Address(v, pubKeyHash);
if (!wk && walletKeyMap[a]) {
wk = walletKeyMap[a];
}
});
});
return wk;
};
TransactionBuilder.prototype._findWalletKey = function(walletKeyMap, input) {
var wk;
if (input.address) {
wk = walletKeyMap[input.address];
} else if (input.pubKeyHash) {
wk = this._multiFindKey(walletKeyMap, input.pubKeyHash);
} else if (input.pubKeyBuf) {
var pubKeyHash = util.sha256ripe160(input.pubKeyBuf);
wk = this._multiFindKey(walletKeyMap, pubKeyHash);
} else {
throw new Error('no infomation at input to find keys');
}
return wk;
};
TransactionBuilder.prototype._signPubKey = function(walletKeyMap, input, txSigHash) {
if (this.tx.ins[input.i].s.length > 0) return {};
var wk = this._findWalletKey(walletKeyMap, input);
if (!wk) return;
var sigRaw = TransactionBuilder._signHashAndVerify(wk, txSigHash);
var sigType = new Buffer(1);
sigType[0] = this.signhash;
var sig = Buffer.concat([sigRaw, sigType]);
var scriptSig = new Script();
scriptSig.chunks.push(sig);
scriptSig.updateBuffer();
return {
inputFullySigned: true,
signaturesAdded: 1,
script: scriptSig.getBuffer()
};
};
TransactionBuilder.prototype._signPubKeyHash = function(walletKeyMap, input, txSigHash) {
if (this.tx.ins[input.i].s.length > 0) return {};
var wk = this._findWalletKey(walletKeyMap, input);
if (!wk) return;
var sigRaw = TransactionBuilder._signHashAndVerify(wk, txSigHash);
var sigType = new Buffer(1);
sigType[0] = this.signhash;
var sig = Buffer.concat([sigRaw, sigType]);
var scriptSig = new Script();
scriptSig.chunks.push(sig);
scriptSig.chunks.push(wk.privKey.public);
scriptSig.updateBuffer();
return {
inputFullySigned: true,
signaturesAdded: 1,
script: scriptSig.getBuffer()
};
};
/* FOR TESTING
var _dumpChunks = function (scriptSig, label) {
console.log('## DUMP: ' + label + ' ##');
for(var i=0; i<scriptSig.chunks.length; i++) {
console.log('\tCHUNK ', i, Buffer.isBuffer(scriptSig.chunks[i])
?scriptSig.chunks[i].toString('hex'):scriptSig.chunks[i] );
}
};
*/
TransactionBuilder.prototype._chunkSignedWithKey = function(scriptSig, txSigHash, publicKey) {
var ret;
var k = new Key();
k.public = publicKey;
for (var i = 1; i <= scriptSig.countSignatures(); i++) {
var chunk = scriptSig.chunks[i];
var sigRaw = new Buffer(chunk.slice(0, chunk.length - 1));
if (k.verifySignatureSync(txSigHash, sigRaw)) {
ret = chunk;
}
}
return ret;
};
TransactionBuilder.prototype._getSignatureOrder = function(sigPrio, sigRaw, txSigHash, pubkeys) {
var l = pubkeys.length;
for (var j = 0; j < l; j++) {
var k = new Key();
k.public = new Buffer(pubkeys[j], 'hex');
if (k.verifySignatureSync(txSigHash, sigRaw))
break;
}
return j;
};
TransactionBuilder.prototype._getNewSignatureOrder = function(sigPrio, scriptSig, txSigHash, pubkeys) {
var iPrio;
for (var i = 1; i <= scriptSig.countSignatures(); i++) {
var chunk = scriptSig.chunks[i];
var sigRaw = new Buffer(chunk.slice(0, chunk.length - 1));
iPrio = this._getSignatureOrder(sigPrio, sigRaw, txSigHash, pubkeys);
if (sigPrio <= iPrio) break;
}
return (sigPrio === iPrio ? -1 : i - 1);
};
TransactionBuilder.prototype._chunkIsEmpty = function(chunk) {
return chunk === 0 || // when serializing and back, EMPTY_BUFFER becomes 0
buffertools.compare(chunk, util.EMPTY_BUFFER) === 0;
};
TransactionBuilder.prototype._initMultiSig = function(script) {
var wasUpdated = false;
if (script.chunks[0] !== 0) {
script.prependOp0();
wasUpdated = true;
}
return wasUpdated;
};
TransactionBuilder.prototype._updateMultiSig = function(sigPrio, wk, scriptSig, txSigHash, pubkeys) {
var wasUpdated = this._initMultiSig(scriptSig);
if (this._chunkSignedWithKey(scriptSig, txSigHash, wk.privKey.public))
return null;
// Create signature
var sigRaw = TransactionBuilder._signHashAndVerify(wk, txSigHash);
var sigType = new Buffer(1);
sigType[0] = this.signhash;
var sig = Buffer.concat([sigRaw, sigType]);
// Add signature
var order = this._getNewSignatureOrder(sigPrio, scriptSig, txSigHash, pubkeys);
scriptSig.chunks.splice(order + 1, 0, sig);
scriptSig.updateBuffer();
wasUpdated = true;
return wasUpdated ? scriptSig : null;
};
TransactionBuilder.prototype._signMultiSig = function(walletKeyMap, input, txSigHash) {
var pubkeys = input.scriptPubKey.capture(),
nreq = input.scriptPubKey.chunks[0] - 80, //see OP_2-OP_16
l = pubkeys.length,
originalScriptBuf = this.tx.ins[input.i].s;
var scriptSig = new Script(originalScriptBuf);
var signaturesAdded = 0;
for (var j = 0; j < l && scriptSig.countSignatures() < nreq; j++) {
var wk = this._findWalletKey(walletKeyMap, {
pubKeyBuf: pubkeys[j]
});
if (!wk) continue;
var newScriptSig = this._updateMultiSig(j, wk, scriptSig, txSigHash, pubkeys);
if (newScriptSig) {
scriptSig = newScriptSig;
signaturesAdded++;
}
}
var ret = {
inputFullySigned: scriptSig.countSignatures() === nreq,
signaturesAdded: signaturesAdded,
script: scriptSig.getBuffer(),
};
return ret;
};
var fnToSign = {};
TransactionBuilder.prototype._scriptIsAppended = function(script, scriptToAddBuf) {
var len = script.chunks.length;
if (script.chunks[len - 1] === undefined)
return false;
if (typeof script.chunks[len - 1] === 'number')
return false;
if (buffertools.compare(script.chunks[len - 1], scriptToAddBuf) !== 0)
return false;
return true;
};
TransactionBuilder.prototype._addScript = function(scriptBuf, scriptToAddBuf) {
var s = new Script(scriptBuf);
if (!this._scriptIsAppended(s, scriptToAddBuf)) {
s.chunks.push(scriptToAddBuf);
s.updateBuffer();
}
return s.getBuffer();
};
TransactionBuilder.prototype._getInputForP2sh = function(script, index) {
var scriptType = script.classify();
/* pubKeyHash is needed for TX_PUBKEYHASH and TX_PUBKEY to retrieve the keys. */
var pubKeyHash;
switch (scriptType) {
case Script.TX_PUBKEYHASH:
pubKeyHash = script.captureOne();
break;
case Script.TX_PUBKEY:
var chunk = script.captureOne();
pubKeyHash = util.sha256ripe160(chunk);
}
return {
i: index,
pubKeyHash: pubKeyHash,
scriptPubKey: script,
scriptType: scriptType,
isP2sh: true,
};
};
TransactionBuilder.prototype._p2shInput = function(input) {
if (!this.hashToScriptMap)
throw new Error('hashToScriptMap not set');
var scriptHex = this.hashToScriptMap[input.address];
if (!scriptHex) return;
var scriptBuf = new Buffer(scriptHex, 'hex');
var script = new Script(scriptBuf);
var scriptType = script.classify();
if (!fnToSign[scriptType] || scriptType === Script.TX_SCRIPTHASH)
throw new Error('dont know how to sign p2sh script type:' + script.getRawOutType());
return {
input: this._getInputForP2sh(script, input.i),
txSigHash: this.tx.hashForSignature(script, input.i, this.signhash),
scriptType: script.classify(),
scriptBuf: scriptBuf,
};
};
TransactionBuilder.prototype._signScriptHash = function(walletKeyMap, input, txSigHash) {
var p2sh = this._p2shInput(input);
var ret = fnToSign[p2sh.scriptType].call(this, walletKeyMap, p2sh.input, p2sh.txSigHash);
if (ret && ret.script && ret.signaturesAdded) {
ret.script = this._addScript(ret.script, p2sh.scriptBuf);
}
return ret;
};
fnToSign[Script.TX_PUBKEYHASH] = TransactionBuilder.prototype._signPubKeyHash;
fnToSign[Script.TX_PUBKEY] = TransactionBuilder.prototype._signPubKey;
fnToSign[Script.TX_MULTISIG] = TransactionBuilder.prototype._signMultiSig;
fnToSign[Script.TX_SCRIPTHASH] = TransactionBuilder.prototype._signScriptHash;
// sign
// ----
// Signs a transaction.
// `keys`: an array of strings representing private keys to sign the
// transaction in WIF private key format OR bitcore's `WalletKey` objects
//
// If multiple keys are given, each will be tested against the transaction's
// scriptPubKeys. Only the valid private keys will be used to sign.
// This method is fully compatible with *multisig* transactions.
//
// `.isFullySigned` can be queried to check is the transactions have all the needed
// signatures.
//
//
TransactionBuilder.prototype.sign = function(keys) {
if (!(keys instanceof Array))
throw new Error('parameter should be an array');
this._checkTx();
var tx = this.tx,
ins = tx.ins,
l = ins.length,
walletKeyMap = TransactionBuilder._mapKeys(keys);
for (var i = 0; i < l; i++) {
var input = this.inputMap[i];
var txSigHash = this.tx.hashForSignature(
input.scriptPubKey, i, this.signhash);
var ret = fnToSign[input.scriptType].call(this, walletKeyMap, input, txSigHash);
if (ret && ret.script) {
this.vanilla.scriptSig[i] = ret.script.toString('hex');
tx.ins[i].s = ret.script;
if (ret.inputFullySigned) this.inputsSigned++;
}
}
return this;
};
// setHashToScriptMap
// ------------------
// Needed for setup Address to Script maps
// for p2sh transactions. See `.infoForP2sh`
// for generate the input for this call.
//
TransactionBuilder.prototype.setHashToScriptMap = function(hashToScriptMap) {
this.vanilla.hashToScriptMap = JSON.stringify(hashToScriptMap);
this.hashToScriptMap = hashToScriptMap;
return this;
};
// isFullySigned
// -------------
// Checks if the transaction have all the necesary signatures.
//
TransactionBuilder.prototype.isFullySigned = function() {
return this.inputsSigned === this.tx.ins.length;
};
TransactionBuilder.prototype.build = function() {
this._checkTx();
return this.tx;
};
// toObj
// -----
// Returns a plain Javascript object that contains
// the full status of the TransactionBuilder instance,
// suitable for serialization, storage and transmition.
// See `.fromObj`
//
TransactionBuilder.prototype.toObj = function() {
var ret = {
version: TOOBJ_VERSION,
outs: JSON.parse(this.vanilla.outs),
utxos: JSON.parse(this.vanilla.utxos),
opts: JSON.parse(this.vanilla.opts),
scriptSig: this.vanilla.scriptSig,
};
if (this.vanilla.hashToScriptMap)
ret.hashToScriptMap = JSON.parse(this.vanilla.hashToScriptMap);
return ret;
};
TransactionBuilder.prototype._setScriptSig = function(inScriptSig) {
this.vanilla.scriptSig = inScriptSig;
for (var i in inScriptSig) {
this.tx.ins[i].s = new Buffer(inScriptSig[i], 'hex');
var scriptSig = new Script(this.tx.ins[i].s);
if (scriptSig.finishedMultiSig() !== false)
this.inputsSigned++;
}
};
// fromObj
// -------
// Returns a TransactionBuilder instance given
// a plain Javascript object created previously
// with `.toObj`. See `.toObj`.
TransactionBuilder.fromObj = function(data) {
if (data.version !== TOOBJ_VERSION)
throw new Error('Incompatible version at TransactionBuilder fromObj');
var b = new TransactionBuilder(data.opts);
if (data.utxos) {
b.setUnspent(data.utxos);
if (data.hashToScriptMap)
b.setHashToScriptMap(data.hashToScriptMap);
if (data.outs) {
b.setOutputs(data.outs);
if (data.scriptSig) {
b._setScriptSig(data.scriptSig);
}
}
}
return b;
};
TransactionBuilder.prototype._checkMergeability = function(b) {
var toCompare = ['opts', 'hashToScriptMap', 'outs', 'uxtos'];
for (var i in toCompare) {
var k = toCompare[i];
if (JSON.stringify(this.vanilla[k]) !== JSON.stringify(b.vanilla[k]))
throw new Error('cannot merge: incompatible builders:' + k)
}
};
// TODO this could be on Script class
TransactionBuilder.prototype._mergeInputSigP2sh = function(input, s0, s1) {
var p2sh = this._p2shInput(input);
var redeemScript = new Script(p2sh.scriptBuf);
var pubkeys = redeemScript.capture();
// Look for differences
var s0keys = {};
var l = pubkeys.length;
for (var j = 0; j < l; j++) {
if (this._chunkSignedWithKey(s0, p2sh.txSigHash, pubkeys[j]))
s0keys[pubkeys[j].toString('hex')] = 1;
}
var diff = [];
for (var j = 0; j < l; j++) {
var chunk = this._chunkSignedWithKey(s1, p2sh.txSigHash, pubkeys[j]);
var pubHex = pubkeys[j].toString('hex');
if (chunk && !s0keys[pubHex]) {
diff.push({
prio: j,
chunk: chunk,
pubHex: pubHex,
});
}
}
// Add signatures
for (var j in diff) {
var newSig = diff[j];
var order = this._getNewSignatureOrder(newSig.prio, s0, p2sh.txSigHash, pubkeys);
s0.chunks.splice(order + 1, 0, newSig.chunk);
}
s0.updateBuffer();
return s0.getBuffer();
};
// TODO: move this to script
TransactionBuilder.prototype._getSighashType = function(sig) {
return sig[sig.length - 1];
};
TransactionBuilder.prototype._checkSignHash = function(s1) {
var l = s1.chunks.length - 1;
for (var i = 0; i < l; i++) {
if (i == 0 && s1.chunks[i] === 0)
continue;
if (this._getSighashType(s1.chunks[i]) !== this.signhash)
throw new Error('signhash type mismatch at merge p2sh');
}
};
// TODO this could be on Script class
TransactionBuilder.prototype._mergeInputSig = function(index, s0buf, s1buf) {
if (buffertools.compare(s0buf, s1buf) === 0)
return s0buf;
var s0 = new Script(s0buf);
var s1 = new Script(s1buf);
var l0 = s0.chunks.length;
var l1 = s1.chunks.length;
var s0map = {};
if (l0 && l1 && ((l0 < 2 && l1 > 2) || (l1 < 2 && l0 > 2)))
throw new Error('TX sig types mismatch in merge');
if ((!l0 && !l1) || (l0 && !l1))
return s0buf;
this._checkSignHash(s1);
if ((!l0 && l1))
return s1buf;
// Get the pubkeys
var input = this.inputMap[index];
var type = input.scriptPubKey.classify();
//p2pubkey or p2pubkeyhash
if (type === Script.TX_PUBKEYHASH || type === Script.TX_PUBKEY) {
var s = new Script(s1buf);
log.debug('Merging two signed inputs type:' +
input.scriptPubKey.getRawOutType() + '. Signatures differs. Using the first version.');
return s0buf;
} else if (type !== Script.TX_SCRIPTHASH) {
// No support for normal multisig or strange txs.
throw new Error('Script type:' + input.scriptPubKey.getRawOutType() + 'not supported at #merge');
}
return this._mergeInputSigP2sh(input, s0, s1);
};
// TODO this could be on Transaction class
TransactionBuilder.prototype._mergeTx = function(tx) {
var v0 = this.tx;
var v1 = tx;
var l = v0.ins.length;
if (l !== v1.ins.length)
throw new Error('TX in length mismatch in merge');
this.inputsSigned = 0;
for (var i = 0; i < l; i++) {
var i0 = v0.ins[i];
var i1 = v1.ins[i];
if (i0.q !== i1.q)
throw new Error('TX sequence ins mismatch in merge. Input:', i);
if (buffertools.compare(i0.o, i1.o) !== 0)
throw new Error('TX .o in mismatch in merge. Input:', i);
i0.s = this._mergeInputSig(i, i0.s, i1.s);
this.vanilla.scriptSig[i] = i0.s.toString('hex');
if (v0.isInputComplete(i)) this.inputsSigned++;
}
};
// clone
// -----
// Clone current TransactionBuilder, regenerate derived fields.
//
TransactionBuilder.prototype.clone = function() {
return new TransactionBuilder.fromObj(this.toObj());
};
// merge
// -----
// Merge to TransactionBuilder objects, merging inputs signatures.
// This function supports multisig p2sh inputs.
TransactionBuilder.prototype.merge = function(inB) {
//
var b = inB.clone();
this._checkMergeability(b);
// Does this tX have any signature already?
if (this.tx || b.tx) {
if (this.tx.getNormalizedHash().toString('hex') !== b.tx.getNormalizedHash().toString('hex'))
throw new Error('mismatch at TransactionBuilder NTXID');
this._mergeTx(b.tx);
}
};
module.exports = TransactionBuilder;
| 1 | 12,993 | if (typeof (opts.fee) !== 'undefined' || typeof (opts.feeSat) !=='undefined') { | bitpay-bitcore | js |
@@ -38,7 +38,13 @@ type Device struct {
}
func (d *Device) CgroupString() string {
- return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
+ var p string
+ if d.Permissions == "" {
+ p = "rwm" // empty permissions is invalid... causes a write invalid argument error upon saving to cgroups
+ } else {
+ p = d.Permissions
+ }
+ return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), p)
}
func (d *Device) Mkdev() int { | 1 | package configs
import (
"fmt"
"os"
)
const (
Wildcard = -1
)
// TODO Windows: This can be factored out in the future
type Device struct {
// Device type, block, char, etc.
Type rune `json:"type"`
// Path to the device.
Path string `json:"path"`
// Major is the device's major number.
Major int64 `json:"major"`
// Minor is the device's minor number.
Minor int64 `json:"minor"`
// Cgroup permissions format, rwm.
Permissions string `json:"permissions"`
// FileMode permission bits for the device.
FileMode os.FileMode `json:"file_mode"`
// Uid of the device.
Uid uint32 `json:"uid"`
// Gid of the device.
Gid uint32 `json:"gid"`
}
func (d *Device) CgroupString() string {
return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions)
}
func (d *Device) Mkdev() int {
return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12))
}
// deviceNumberString converts the device number to a string return result.
func deviceNumberString(number int64) string {
if number == Wildcard {
return "*"
}
return fmt.Sprint(number)
}
| 1 | 9,683 | I not sure we need to change this method's logic. Isn't no perms and invalid config? | opencontainers-runc | go |
@@ -23,7 +23,7 @@ const (
protocolName = "hive"
protocolVersion = "1.0.0"
peersStreamName = "peers"
- messageTimeout = 5 * time.Second // maximum allowed time for a message to be read or written.
+ messageTimeout = 1 * time.Minute // maximum allowed time for a message to be read or written.
maxBatchSize = 50
)
| 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hive
import (
"context"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/hive/pb"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/swarm"
ma "github.com/multiformats/go-multiaddr"
)
const (
protocolName = "hive"
protocolVersion = "1.0.0"
peersStreamName = "peers"
messageTimeout = 5 * time.Second // maximum allowed time for a message to be read or written.
maxBatchSize = 50
)
type Service struct {
streamer p2p.Streamer
addressBook addressbook.GetPutter
peerHandler func(context.Context, swarm.Address) error
logger logging.Logger
}
type Options struct {
Streamer p2p.Streamer
AddressBook addressbook.GetPutter
Logger logging.Logger
}
func New(o Options) *Service {
return &Service{
streamer: o.Streamer,
logger: o.Logger,
addressBook: o.AddressBook,
}
}
func (s *Service) Protocol() p2p.ProtocolSpec {
return p2p.ProtocolSpec{
Name: protocolName,
Version: protocolVersion,
StreamSpecs: []p2p.StreamSpec{
{
Name: peersStreamName,
Handler: s.peersHandler,
},
},
}
}
func (s *Service) BroadcastPeers(ctx context.Context, addressee swarm.Address, peers ...swarm.Address) error {
max := maxBatchSize
for len(peers) > 0 {
if max > len(peers) {
max = len(peers)
}
if err := s.sendPeers(ctx, addressee, peers[:max]); err != nil {
return err
}
peers = peers[max:]
}
return nil
}
func (s *Service) SetPeerAddedHandler(h func(ctx context.Context, addr swarm.Address) error) {
s.peerHandler = h
}
func (s *Service) sendPeers(ctx context.Context, peer swarm.Address, peers []swarm.Address) error {
stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, peersStreamName)
if err != nil {
return fmt.Errorf("new stream: %w", err)
}
defer stream.Close()
w, _ := protobuf.NewWriterAndReader(stream)
var peersRequest pb.Peers
for _, p := range peers {
addr, found := s.addressBook.Get(p)
if !found {
s.logger.Debugf("Peer not found %s", peer, err)
continue
}
peersRequest.Peers = append(peersRequest.Peers, &pb.BzzAddress{
Overlay: p.Bytes(),
Underlay: addr.String(),
})
}
if err := w.WriteMsg(&peersRequest); err != nil {
return fmt.Errorf("write Peers message: %w", err)
}
return stream.FullClose()
}
func (s *Service) peersHandler(_ context.Context, peer p2p.Peer, stream p2p.Stream) error {
_, r := protobuf.NewWriterAndReader(stream)
var peersReq pb.Peers
if err := r.ReadMsgWithTimeout(messageTimeout, &peersReq); err != nil {
stream.Close()
return fmt.Errorf("read requestPeers message: %w", err)
}
stream.Close()
for _, newPeer := range peersReq.Peers {
addr, err := ma.NewMultiaddr(newPeer.Underlay)
if err != nil {
s.logger.Infof("Skipping peer in response %s: %w", newPeer, err)
continue
}
s.addressBook.Put(swarm.NewAddress(newPeer.Overlay), addr)
if s.peerHandler != nil {
if err := s.peerHandler(context.Background(), swarm.NewAddress(newPeer.Overlay)); err != nil {
return err
}
}
}
return nil
}
| 1 | 9,096 | this is an abysmal amount of time. can you explain under which circumstances you should wait for a minute for a message? | ethersphere-bee | go |
@@ -342,6 +342,8 @@ func (o *deployJobOpts) runtimeConfig(addonsURL string) (*stack.RuntimeConfig, e
AddonsTemplateURL: addonsURL,
AdditionalTags: tags.Merge(o.targetApp.Tags, o.resourceTags),
ServiceDiscoveryEndpoint: endpoint,
+ AccountID: o.targetApp.AccountID,
+ Region: o.targetEnvironment.Region,
}, nil
}
| 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"os"
"strings"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/describe"
"github.com/aws/copilot-cli/internal/pkg/addon"
"github.com/aws/copilot-cli/internal/pkg/exec"
"github.com/aws/copilot-cli/internal/pkg/repository"
"github.com/aws/copilot-cli/internal/pkg/term/log"
awscloudformation "github.com/aws/copilot-cli/internal/pkg/aws/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/aws/ecr"
"github.com/aws/copilot-cli/internal/pkg/aws/s3"
"github.com/aws/copilot-cli/internal/pkg/aws/sessions"
"github.com/aws/copilot-cli/internal/pkg/aws/tags"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation/stack"
"github.com/aws/copilot-cli/internal/pkg/manifest"
"github.com/aws/copilot-cli/internal/pkg/term/color"
termprogress "github.com/aws/copilot-cli/internal/pkg/term/progress"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/term/selector"
"github.com/aws/copilot-cli/internal/pkg/workspace"
"github.com/spf13/cobra"
)
type deployJobOpts struct {
deployWkldVars
store store
ws wsJobDirReader
unmarshal func(in []byte) (manifest.WorkloadManifest, error)
cmd runner
addons templater
appCFN appResourcesGetter
jobCFN cloudformation.CloudFormation
imageBuilderPusher imageBuilderPusher
sessProvider sessionProvider
s3 artifactUploader
envUpgradeCmd actionCommand
endpointGetter endpointGetter
spinner progress
sel wsSelector
prompt prompter
targetApp *config.Application
targetEnvironment *config.Environment
targetJob *config.Workload
imageDigest string
buildRequired bool
}
func newJobDeployOpts(vars deployWkldVars) (*deployJobOpts, error) {
store, err := config.NewStore()
if err != nil {
return nil, fmt.Errorf("new config store: %w", err)
}
ws, err := workspace.New()
if err != nil {
return nil, fmt.Errorf("new workspace: %w", err)
}
prompter := prompt.New()
if err != nil {
return nil, err
}
return &deployJobOpts{
deployWkldVars: vars,
store: store,
ws: ws,
unmarshal: manifest.UnmarshalWorkload,
spinner: termprogress.NewSpinner(log.DiagnosticWriter),
sel: selector.NewWorkspaceSelect(prompter, store, ws),
prompt: prompter,
cmd: exec.NewCmd(),
sessProvider: sessions.NewProvider(),
}, nil
}
// Validate returns an error if the user inputs are invalid.
func (o *deployJobOpts) Validate() error {
if o.appName == "" {
return errNoAppInWorkspace
}
if o.name != "" {
if err := o.validateJobName(); err != nil {
return err
}
}
if o.envName != "" {
if err := o.validateEnvName(); err != nil {
return err
}
}
return nil
}
// Ask prompts the user for any required fields that are not provided.
func (o *deployJobOpts) Ask() error {
if err := o.askJobName(); err != nil {
return err
}
if err := o.askEnvName(); err != nil {
return err
}
return nil
}
// Execute builds and pushes the container image for the job.
func (o *deployJobOpts) Execute() error {
o.imageTag = imageTagFromGit(o.cmd, o.imageTag) // Best effort assign git tag.
env, err := targetEnv(o.store, o.appName, o.envName)
if err != nil {
return err
}
o.targetEnvironment = env
app, err := o.store.GetApplication(o.appName)
if err != nil {
return err
}
o.targetApp = app
job, err := o.store.GetJob(o.appName, o.name)
if err != nil {
return fmt.Errorf("get job configuration: %w", err)
}
o.targetJob = job
if err := o.configureClients(); err != nil {
return err
}
if err := o.envUpgradeCmd.Execute(); err != nil {
return fmt.Errorf(`execute "env upgrade --app %s --name %s": %v`, o.appName, o.targetEnvironment.Name, err)
}
if err := o.configureContainerImage(); err != nil {
return err
}
addonsURL, err := o.pushAddonsTemplateToS3Bucket()
if err != nil {
return err
}
return o.deployJob(addonsURL)
}
// pushAddonsTemplateToS3Bucket generates the addons template for the job and pushes it to S3.
// If the job doesn't have any addons, it returns the empty string and no errors.
// If the job has addons, it returns the URL of the S3 object storing the addons template.
func (o *deployJobOpts) pushAddonsTemplateToS3Bucket() (string, error) {
template, err := o.addons.Template()
if err != nil {
var notFoundErr *addon.ErrAddonsNotFound
if errors.As(err, ¬FoundErr) {
// addons doesn't exist for job, the url is empty.
return "", nil
}
return "", fmt.Errorf("retrieve addons template: %w", err)
}
resources, err := o.appCFN.GetAppResourcesByRegion(o.targetApp, o.targetEnvironment.Region)
if err != nil {
return "", fmt.Errorf("get app resources: %w", err)
}
reader := strings.NewReader(template)
url, err := o.s3.PutArtifact(resources.S3Bucket, fmt.Sprintf(deploy.AddonsCfnTemplateNameFormat, o.name), reader)
if err != nil {
return "", fmt.Errorf("put addons artifact to bucket %s: %w", resources.S3Bucket, err)
}
return url, nil
}
func (o *deployJobOpts) configureClients() error {
defaultSessEnvRegion, err := o.sessProvider.DefaultWithRegion(o.targetEnvironment.Region)
if err != nil {
return fmt.Errorf("create ECR session with region %s: %w", o.targetEnvironment.Region, err)
}
envSession, err := o.sessProvider.FromRole(o.targetEnvironment.ManagerRoleARN, o.targetEnvironment.Region)
if err != nil {
return fmt.Errorf("assuming environment manager role: %w", err)
}
// ECR client against tools account profile AND target environment region
repoName := fmt.Sprintf("%s/%s", o.appName, o.name)
registry := ecr.New(defaultSessEnvRegion)
o.imageBuilderPusher, err = repository.New(repoName, registry)
if err != nil {
return fmt.Errorf("initiate image builder pusher: %w", err)
}
o.s3 = s3.New(defaultSessEnvRegion)
// CF client against env account profile AND target environment region
o.jobCFN = cloudformation.New(envSession)
o.endpointGetter, err = describe.NewEnvDescriber(describe.NewEnvDescriberConfig{
App: o.appName,
Env: o.envName,
ConfigStore: o.store,
})
if err != nil {
return fmt.Errorf("initiate environment describer: %w", err)
}
addonsSvc, err := addon.New(o.name)
if err != nil {
return fmt.Errorf("initiate addons service: %w", err)
}
o.addons = addonsSvc
// client to retrieve an application's resources created with CloudFormation
defaultSess, err := o.sessProvider.Default()
if err != nil {
return fmt.Errorf("create default session: %w", err)
}
o.appCFN = cloudformation.New(defaultSess)
cmd, err := newEnvUpgradeOpts(envUpgradeVars{
appName: o.appName,
name: o.targetEnvironment.Name,
})
if err != nil {
return fmt.Errorf("new env upgrade command: %v", err)
}
o.envUpgradeCmd = cmd
return nil
}
func (o *deployJobOpts) configureContainerImage() error {
job, err := o.manifest()
if err != nil {
return err
}
required, err := manifest.JobDockerfileBuildRequired(job)
if err != nil {
return err
}
if !required {
return nil
}
// If it is built from local Dockerfile, build and push to the ECR repo.
buildArg, err := o.dfBuildArgs(job)
if err != nil {
return err
}
digest, err := o.imageBuilderPusher.BuildAndPush(exec.NewDockerCommand(), buildArg)
if err != nil {
return fmt.Errorf("build and push image: %w", err)
}
o.imageDigest = digest
o.buildRequired = true
return nil
}
func (o *deployJobOpts) dfBuildArgs(job interface{}) (*exec.BuildArguments, error) {
copilotDir, err := o.ws.CopilotDirPath()
if err != nil {
return nil, fmt.Errorf("get copilot directory: %w", err)
}
return buildArgs(o.name, o.imageTag, copilotDir, job)
}
func (o *deployJobOpts) deployJob(addonsURL string) error {
conf, err := o.stackConfiguration(addonsURL)
if err != nil {
return err
}
if err := o.jobCFN.DeployService(os.Stderr, conf, awscloudformation.WithRoleARN(o.targetEnvironment.ExecutionRoleARN)); err != nil {
return fmt.Errorf("deploy job: %w", err)
}
log.Successf("Deployed %s.\n", color.HighlightUserInput(o.name))
return nil
}
func (o *deployJobOpts) stackConfiguration(addonsURL string) (cloudformation.StackConfiguration, error) {
mft, err := o.manifest()
if err != nil {
return nil, err
}
rc, err := o.runtimeConfig(addonsURL)
if err != nil {
return nil, err
}
var conf cloudformation.StackConfiguration
switch t := mft.(type) {
case *manifest.ScheduledJob:
conf, err = stack.NewScheduledJob(t, o.targetEnvironment.Name, o.targetEnvironment.App, *rc)
default:
return nil, fmt.Errorf("unknown manifest type %T while creating the CloudFormation stack", t)
}
if err != nil {
return nil, fmt.Errorf("create stack configuration: %w", err)
}
return conf, nil
}
func (o *deployJobOpts) runtimeConfig(addonsURL string) (*stack.RuntimeConfig, error) {
endpoint, err := o.endpointGetter.ServiceDiscoveryEndpoint()
if err != nil {
return nil, err
}
if !o.buildRequired {
return &stack.RuntimeConfig{
AddonsTemplateURL: addonsURL,
AdditionalTags: tags.Merge(o.targetApp.Tags, o.resourceTags),
ServiceDiscoveryEndpoint: endpoint,
}, nil
}
resources, err := o.appCFN.GetAppResourcesByRegion(o.targetApp, o.targetEnvironment.Region)
if err != nil {
return nil, fmt.Errorf("get application %s resources from region %s: %w", o.targetApp.Name, o.targetEnvironment.Region, err)
}
repoURL, ok := resources.RepositoryURLs[o.name]
if !ok {
return nil, &errRepoNotFound{
wlName: o.name,
envRegion: o.targetEnvironment.Region,
appAccountID: o.targetApp.AccountID,
}
}
return &stack.RuntimeConfig{
Image: &stack.ECRImage{
RepoURL: repoURL,
ImageTag: o.imageTag,
Digest: o.imageDigest,
},
AddonsTemplateURL: addonsURL,
AdditionalTags: tags.Merge(o.targetApp.Tags, o.resourceTags),
ServiceDiscoveryEndpoint: endpoint,
}, nil
}
func (o *deployJobOpts) manifest() (interface{}, error) {
raw, err := o.ws.ReadJobManifest(o.name)
if err != nil {
return nil, fmt.Errorf("read job %s manifest: %w", o.name, err)
}
mft, err := o.unmarshal(raw)
if err != nil {
return nil, fmt.Errorf("unmarshal job %s manifest: %w", o.name, err)
}
envMft, err := mft.ApplyEnv(o.envName)
if err != nil {
return nil, fmt.Errorf("apply environment %s override: %s", o.envName, err)
}
return envMft, nil
}
// RecommendedActions returns follow-up actions the user can take after successfully executing the command.
func (o *deployJobOpts) RecommendedActions() []string {
return nil
}
func (o *deployJobOpts) validateJobName() error {
names, err := o.ws.JobNames()
if err != nil {
return fmt.Errorf("list jobs in the workspace: %w", err)
}
for _, name := range names {
if o.name == name {
return nil
}
}
return fmt.Errorf("job %s not found in the workspace", color.HighlightUserInput(o.name))
}
func (o *deployJobOpts) validateEnvName() error {
if _, err := targetEnv(o.store, o.appName, o.envName); err != nil {
return err
}
return nil
}
func (o *deployJobOpts) askJobName() error {
if o.name != "" {
return nil
}
name, err := o.sel.Job("Select a job from your workspace", "")
if err != nil {
return fmt.Errorf("select job: %w", err)
}
o.name = name
return nil
}
func (o *deployJobOpts) askEnvName() error {
if o.envName != "" {
return nil
}
name, err := o.sel.Environment("Select an environment", "", o.appName)
if err != nil {
return fmt.Errorf("select environment: %w", err)
}
o.envName = name
return nil
}
// buildJobDeployCmd builds the `job deploy` subcommand.
func buildJobDeployCmd() *cobra.Command {
vars := deployWkldVars{}
cmd := &cobra.Command{
Use: "deploy",
Short: "Deploys a job to an environment.",
Long: `Deploys a job to an environment.`,
Example: `
Deploys a job named "report-gen" to a "test" environment.
/code $ copilot job deploy --name report-gen --env test
Deploys a job with additional resource tags.
/code $ copilot job deploy --resource-tags source/revision=bb133e7,deployment/initiator=manual`,
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts, err := newJobDeployOpts(vars)
if err != nil {
return err
}
if err := opts.Validate(); err != nil {
return err
}
if err := opts.Ask(); err != nil {
return err
}
if err := opts.Execute(); err != nil {
return err
}
return nil
}),
}
cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription)
cmd.Flags().StringVarP(&vars.name, nameFlag, nameFlagShort, "", jobFlagDescription)
cmd.Flags().StringVarP(&vars.envName, envFlag, envFlagShort, "", envFlagDescription)
cmd.Flags().StringVar(&vars.imageTag, imageTagFlag, "", imageTagFlagDescription)
cmd.Flags().StringToStringVar(&vars.resourceTags, resourceTagsFlag, nil, resourceTagsFlagDescription)
return cmd
}
| 1 | 18,497 | Can we add this code to `job_package.go` and `svc_package.go`? | aws-copilot-cli | go |
@@ -273,7 +273,9 @@ func CreatePhysicalNode(id NodeID, spec PhysicalProcedureSpec) *PhysicalPlanNode
}
}
-const NextPlanNodeIDKey = "NextPlanNodeID"
+type nodeIDKey string
+
+const NextPlanNodeIDKey nodeIDKey = "NextPlanNodeID"
func CreateUniquePhysicalNode(ctx context.Context, prefix string, spec PhysicalProcedureSpec) *PhysicalPlanNode {
if value := ctx.Value(NextPlanNodeIDKey); value != nil { | 1 | package plan
import (
"context"
"fmt"
"math"
"github.com/influxdata/flux/interpreter"
)
// PhysicalPlanner performs transforms a logical plan to a physical plan,
// by applying any registered physical rules.
type PhysicalPlanner interface {
Plan(ctx context.Context, lplan *Spec) (*Spec, error)
}
// NewPhysicalPlanner creates a new physical plan with the specified options.
// The new plan will be configured to apply any physical rules that have been registered.
func NewPhysicalPlanner(options ...PhysicalOption) PhysicalPlanner {
pp := &physicalPlanner{
heuristicPlanner: newHeuristicPlanner(),
defaultMemoryLimit: math.MaxInt64,
}
rules := make([]Rule, len(ruleNameToPhysicalRule))
i := 0
for _, v := range ruleNameToPhysicalRule {
rules[i] = v
i++
}
pp.addRules(rules...)
pp.addRules(physicalConverterRule{})
// Options may add or remove rules, so process them after we've
// added registered rules.
for _, opt := range options {
opt.apply(pp)
}
return pp
}
func (pp *physicalPlanner) Plan(ctx context.Context, spec *Spec) (*Spec, error) {
transformedSpec, err := pp.heuristicPlanner.Plan(ctx, spec)
if err != nil {
return nil, err
}
// Compute time bounds for nodes in the plan
if err := transformedSpec.BottomUpWalk(ComputeBounds); err != nil {
return nil, err
}
// Set all default and/or registered trigger specs
if err := transformedSpec.TopDownWalk(SetTriggerSpec); err != nil {
return nil, err
}
// Ensure that the plan is valid
if !pp.disableValidation {
err := transformedSpec.CheckIntegrity()
if err != nil {
return nil, err
}
err = validatePhysicalPlan(transformedSpec)
if err != nil {
return nil, err
}
}
// Update memory quota
if transformedSpec.Resources.MemoryBytesQuota == 0 {
transformedSpec.Resources.MemoryBytesQuota = pp.defaultMemoryLimit
}
// Update concurrency quota
if transformedSpec.Resources.ConcurrencyQuota == 0 {
transformedSpec.Resources.ConcurrencyQuota = len(transformedSpec.Roots)
}
return transformedSpec, nil
}
func validatePhysicalPlan(plan *Spec) error {
err := plan.BottomUpWalk(func(pn Node) error {
if validator, ok := pn.ProcedureSpec().(PostPhysicalValidator); ok {
return validator.PostPhysicalValidate(pn.ID())
}
ppn, ok := pn.(*PhysicalPlanNode)
if !ok {
return fmt.Errorf("invalid physical query plan; found logical operation \"%v\"", pn.ID())
}
if ppn.TriggerSpec == nil {
return fmt.Errorf("invalid physical query plan; trigger spec not set on \"%v\"", ppn.id)
}
return nil
})
return err
}
type physicalPlanner struct {
*heuristicPlanner
defaultMemoryLimit int64
disableValidation bool
}
// PhysicalOption is an option to configure the behavior of the physical plan.
type PhysicalOption interface {
apply(*physicalPlanner)
}
type physicalOption func(*physicalPlanner)
func (opt physicalOption) apply(p *physicalPlanner) {
opt(p)
}
// WithDefaultMemoryLimit sets the default memory limit for plans generated by the plan.
// If the query spec explicitly sets a memory limit, that limit is used instead of the default.
func WithDefaultMemoryLimit(memBytes int64) PhysicalOption {
return physicalOption(func(p *physicalPlanner) {
p.defaultMemoryLimit = memBytes
})
}
// OnlyPhysicalRules produces a physical plan option that forces only a particular set of rules to be applied.
func OnlyPhysicalRules(rules ...Rule) PhysicalOption {
return physicalOption(func(pp *physicalPlanner) {
pp.clearRules()
// Always add physicalConverterRule. It doesn't change the plan but only convert nodes to physical.
// This is required for some pieces to work on the physical plan (e.g. SetTriggerSpec).
pp.addRules(physicalConverterRule{})
pp.addRules(rules...)
})
}
func RemovePhysicalRules(rules ...string) PhysicalOption {
return physicalOption(func(pp *physicalPlanner) {
pp.removeRules(rules...)
})
}
// DisableValidation disables validation in the physical planner.
func DisableValidation() PhysicalOption {
return physicalOption(func(p *physicalPlanner) {
p.disableValidation = true
})
}
// physicalConverterRule rewrites logical nodes that have a ProcedureSpec that implements
// PhysicalProcedureSpec as a physical node. For operations that have a 1:1 relationship
// between their physical and logical operations, this is the default behavior.
type physicalConverterRule struct {
}
func (physicalConverterRule) Name() string {
return "physicalConverterRule"
}
func (physicalConverterRule) Pattern() Pattern {
return Any()
}
func (physicalConverterRule) Rewrite(ctx context.Context, pn Node) (Node, bool, error) {
if _, ok := pn.(*PhysicalPlanNode); ok {
// Already converted
return pn, false, nil
}
ln := pn.(*LogicalNode)
pspec, ok := ln.Spec.(PhysicalProcedureSpec)
if !ok {
// A different rule will do the conversion
return pn, false, nil
}
newNode := PhysicalPlanNode{
bounds: ln.bounds,
id: ln.id,
Spec: pspec,
Source: ln.Source,
}
ReplaceNode(pn, &newNode)
return &newNode, true, nil
}
// PhysicalProcedureSpec is similar to its logical counterpart but must provide a method to determine cost.
type PhysicalProcedureSpec interface {
Kind() ProcedureKind
Copy() ProcedureSpec
Cost(inStats []Statistics) (cost Cost, outStats Statistics)
}
// PhysicalPlanNode represents a physical operation in a plan.
type PhysicalPlanNode struct {
edges
bounds
id NodeID
Spec PhysicalProcedureSpec
Source []interpreter.StackEntry
// The trigger spec defines how and when a transformation
// sends its tables to downstream operators
TriggerSpec TriggerSpec
// The attributes required from inputs to this node
RequiredAttrs []PhysicalAttributes
// The attributes provided to consumers of this node's output
OutputAttrs PhysicalAttributes
}
// ID returns a human-readable id for this plan node.
func (ppn *PhysicalPlanNode) ID() NodeID {
return ppn.id
}
// ProcedureSpec returns the procedure spec for this plan node.
func (ppn *PhysicalPlanNode) ProcedureSpec() ProcedureSpec {
return ppn.Spec
}
func (ppn *PhysicalPlanNode) ReplaceSpec(newSpec ProcedureSpec) error {
physSpec, ok := newSpec.(PhysicalProcedureSpec)
if !ok {
return fmt.Errorf("couldn't replace ProcedureSpec for physical plan node \"%v\"", ppn.ID())
}
ppn.Spec = physSpec
return nil
}
// Kind returns the procedure kind for this plan node.
func (ppn *PhysicalPlanNode) Kind() ProcedureKind {
return ppn.Spec.Kind()
}
func (ppn *PhysicalPlanNode) CallStack() []interpreter.StackEntry {
return ppn.Source
}
func (ppn *PhysicalPlanNode) ShallowCopy() Node {
newNode := new(PhysicalPlanNode)
newNode.edges = ppn.edges.shallowCopy()
newNode.id = ppn.id + "_copy"
// TODO: the type assertion below... is it needed?
newNode.Spec = ppn.Spec.Copy().(PhysicalProcedureSpec)
return newNode
}
// Cost provides the self-cost (i.e., does not include the cost of its predecessors) for
// this plan node. Caller must provide statistics of predecessors to this node.
func (ppn *PhysicalPlanNode) Cost(inStats []Statistics) (cost Cost, outStats Statistics) {
return ppn.Spec.Cost(inStats)
}
// PhysicalAttributes encapsulates sny physical attributes of the result produced
// by a physical plan node, such as collation, etc.
type PhysicalAttributes struct {
}
// CreatePhysicalNode creates a single physical plan node from a procedure spec.
// The newly created physical node has no incoming or outgoing edges.
func CreatePhysicalNode(id NodeID, spec PhysicalProcedureSpec) *PhysicalPlanNode {
return &PhysicalPlanNode{
id: id,
Spec: spec,
}
}
const NextPlanNodeIDKey = "NextPlanNodeID"
func CreateUniquePhysicalNode(ctx context.Context, prefix string, spec PhysicalProcedureSpec) *PhysicalPlanNode {
if value := ctx.Value(NextPlanNodeIDKey); value != nil {
nextNodeID := value.(*int)
id := NodeID(fmt.Sprintf("%s%d", prefix, *nextNodeID))
*nextNodeID++
return CreatePhysicalNode(id, spec)
}
return CreatePhysicalNode(NodeID(prefix), spec)
}
// PostPhysicalValidator provides an interface that can be implemented by PhysicalProcedureSpecs for any
// validation checks to be performed post-physical planning.
type PostPhysicalValidator interface {
PostPhysicalValidate(id NodeID) error
}
| 1 | 16,192 | While you're here, can you change the casing on this so it is unexported? `nextPlanNodeIDKey` instead. | influxdata-flux | go |
@@ -1,4 +1,5 @@
-// +build proto
+// +build !bootstrap
+
// Contains functions related to dispatching work to remote processes.
// Right now those processes must be on the same box because they use
// the local temporary directories, but in the future this might form | 1 | // +build proto
// Contains functions related to dispatching work to remote processes.
// Right now those processes must be on the same box because they use
// the local temporary directories, but in the future this might form
// a foundation for doing real distributed work.
package build
import (
"encoding/binary"
"fmt"
"io"
"os/exec"
"path"
"strings"
"sync"
"github.com/golang/protobuf/proto"
"github.com/google/shlex"
pb "build/proto/worker"
"core"
)
// A workerServer is the structure we use to maintain information about a remote work server.
type workerServer struct {
requests chan *pb.BuildRequest
responses map[string]chan *pb.BuildResponse
responseMutex sync.Mutex
process *exec.Cmd
closing bool
}
// workerMap contains all the remote workers we've started so far.
var workerMap = map[string]*workerServer{}
var workerMutex sync.Mutex
// buildMaybeRemotely builds a target, either sending it to a remote worker if needed,
// or locally if not.
func buildMaybeRemotely(state *core.BuildState, target *core.BuildTarget, inputHash []byte) ([]byte, error) {
worker, workerArgs, localCmd := workerCommandAndArgs(target)
if worker == "" {
return runBuildCommand(state, target, localCmd, inputHash)
}
// The scheme here is pretty minimal; remote workers currently have quite a bit less info than
// local ones get. Over time we'll probably evolve it to add more information.
opts, err := shlex.Split(workerArgs)
if err != nil {
return nil, err
}
log.Debug("Sending remote build request to %s; opts %s", worker, workerArgs)
resp, err := buildRemotely(state.Config, worker, &pb.BuildRequest{
Rule: target.Label.String(),
Labels: target.Labels,
TempDir: path.Join(core.RepoRoot, target.TmpDir()),
Srcs: target.AllSourcePaths(state.Graph),
Opts: opts,
})
if err != nil {
return nil, err
}
out := strings.Join(resp.Messages, "\n")
if !resp.Success {
return nil, fmt.Errorf("Error building target %s: %s", target.Label, out)
}
// Okay, now we might need to do something locally too...
if localCmd != "" {
out2, err := runBuildCommand(state, target, localCmd, inputHash)
return append([]byte(out+"\n"), out2...), err
}
return []byte(out), nil
}
// buildRemotely runs a single build request and returns its response.
func buildRemotely(config *core.Configuration, worker string, req *pb.BuildRequest) (*pb.BuildResponse, error) {
w, err := getOrStartWorker(config, worker)
if err != nil {
return nil, err
}
w.requests <- req
ch := make(chan *pb.BuildResponse, 1)
w.responseMutex.Lock()
w.responses[req.Rule] = ch
w.responseMutex.Unlock()
response := <-ch
return response, nil
}
// getOrStartWorker either retrieves an existing worker process or starts a new one.
func getOrStartWorker(config *core.Configuration, worker string) (*workerServer, error) {
workerMutex.Lock()
defer workerMutex.Unlock()
if w, present := workerMap[worker]; present {
return w, nil
}
// Need to create a new process
cmd := core.ExecCommand(worker)
cmd.Env = core.GeneralBuildEnvironment(config)
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
cmd.Stderr = &stderrLogger{}
if err := cmd.Start(); err != nil {
return nil, err
}
w := &workerServer{
requests: make(chan *pb.BuildRequest),
responses: map[string]chan *pb.BuildResponse{},
process: cmd,
}
go w.sendRequests(stdin)
go w.readResponses(stdout)
workerMap[worker] = w
return w, nil
}
// sendRequests sends requests to a running worker server.
func (w *workerServer) sendRequests(stdin io.Writer) {
for request := range w.requests {
b, err := proto.Marshal(request)
if err != nil { // This shouldn't really happen
log.Error("Failed to serialise request: %s", err)
continue
}
// Protos can't be streamed so we have to do our own framing.
binary.Write(stdin, binary.LittleEndian, int32(len(b)))
stdin.Write(b)
}
}
// readResponses reads the responses from a running worker server and dispatches them appropriately.
func (w *workerServer) readResponses(stdout io.Reader) {
var size int32
for {
if err := binary.Read(stdout, binary.LittleEndian, &size); err != nil {
w.Error("Failed to read response: %s", err)
break
}
buf := make([]byte, size)
if _, err := stdout.Read(buf); err != nil {
w.Error("Failed to read response: %s", err)
break
}
response := pb.BuildResponse{}
if err := proto.Unmarshal(buf, &response); err != nil {
w.Error("Error unmarshaling response: %s", err)
continue
}
w.responseMutex.Lock()
ch, present := w.responses[response.Rule]
delete(w.responses, response.Rule)
w.responseMutex.Unlock()
if present {
log.Debug("Got response from remote worker for %s, success: %v", response.Rule, response.Success)
ch <- &response
} else {
w.Error("Couldn't find response channel for %s", response.Rule)
}
}
}
func (w *workerServer) Error(msg string, args ...interface{}) {
if !w.closing {
log.Error(msg, args...)
}
}
// stderrLogger is used to log any errors from our worker tools.
type stderrLogger struct {
buffer []byte
// suppress will silence any further logging messages when set.
suppress bool
}
// Write implements the io.Writer interface
func (l *stderrLogger) Write(msg []byte) (int, error) {
l.buffer = append(l.buffer, msg...)
if len(l.buffer) > 0 && l.buffer[len(l.buffer)-1] == '\n' {
if !l.suppress {
log.Error("Error from remote worker: %s", strings.TrimSpace(string(l.buffer)))
}
l.buffer = nil
}
return len(msg), nil
}
// StopWorkers stops any running worker processes.
func StopWorkers() {
for name, worker := range workerMap {
log.Debug("Killing build worker %s", name)
worker.closing = true // suppress any error messages from worker
if l, ok := worker.process.Stderr.(*stderrLogger); ok {
l.suppress = true // Make sure we don't print anything as they die.
}
worker.process.Process.Kill()
}
}
| 1 | 8,117 | thanks for tidying all these up, as you can tell they've been used pretty arbitrarily in the past to just exclude things from bootstrap... | thought-machine-please | go |
@@ -204,7 +204,7 @@ class Renderer(Exporter):
if (((len(plot) == 1 and not plot.dynamic)
or (len(plot) > 1 and self.holomap is None) or
(plot.dynamic and len(plot.keys[0]) == 0)) or
- not unbound_dimensions(plot.streams, plot.dimensions)):
+ not unbound_dimensions(plot.streams, plot.dimensions, False)):
fmt = fig_formats[0] if self.fig=='auto' else self.fig
else:
fmt = holomap_formats[0] if self.holomap=='auto' else self.holomap | 1 | """
Public API for all plotting renderers supported by HoloViews,
regardless of plotting package or backend.
"""
from __future__ import unicode_literals
from io import BytesIO
import os, base64
from contextlib import contextmanager
import param
from ..core.io import Exporter
from ..core.options import Store, StoreOptions, SkipRendering
from ..core.util import find_file, unicode, unbound_dimensions
from .. import Layout, HoloMap, AdjointLayout
from .widgets import NdWidget, ScrubberWidget, SelectionWidget
from .. import DynamicMap
from . import Plot
from .comms import JupyterComm
from .util import displayable, collate
from param.parameterized import bothmethod
# Tags used when visual output is to be embedded in HTML
IMAGE_TAG = "<img src='{src}' style='max-width:100%; margin: auto; display: block; {css}'/>"
VIDEO_TAG = """
<video controls style='max-width:100%; margin: auto; display: block; {css}'>
<source src='{src}' type='{mime_type}'>
Your browser does not support the video tag.
</video>"""
PDF_TAG = "<iframe src='{src}' style='width:100%; margin: auto; display: block; {css}'></iframe>"
HTML_TAG = "{src}"
HTML_TAGS = {
'base64': 'data:{mime_type};base64,{b64}', # Use to embed data
'svg': IMAGE_TAG,
'png': IMAGE_TAG,
'gif': IMAGE_TAG,
'webm': VIDEO_TAG,
'mp4': VIDEO_TAG,
'pdf': PDF_TAG,
'html': HTML_TAG
}
MIME_TYPES = {
'svg': 'image/svg+xml',
'png': 'image/png',
'gif': 'image/gif',
'webm': 'video/webm',
'mp4': 'video/mp4',
'pdf': 'application/pdf',
'html': None,
'json': None
}
static_template = """
<html>
<head>
{css}
{js}
</head>
<body>
{html}
</body>
</html>
"""
class Renderer(Exporter):
"""
The job of a Renderer is to turn the plotting state held within
Plot classes into concrete, visual output in the form of the PNG,
SVG, MP4 or WebM formats (among others). Note that a Renderer is a
type of Exporter and must therefore follow the Exporter interface.
The Renderer needs to be able to use the .state property of the
appropriate Plot classes associated with that renderer in order to
generate output. The process of 'drawing' is execute by the Plots
and the Renderer turns the final plotting state into output.
"""
backend = param.String(doc="""
The full, lowercase name of the rendering backend or third
part plotting package used e.g 'matplotlib' or 'cairo'.""")
dpi=param.Integer(None, allow_None=True, doc="""
The render resolution in dpi (dots per inch)""")
fig = param.ObjectSelector(default='auto', objects=['auto'], doc="""
Output render format for static figures. If None, no figure
rendering will occur. """)
fps=param.Number(20, doc="""
Rendered fps (frames per second) for animated formats.""")
holomap = param.ObjectSelector(default='auto',
objects=['scrubber','widgets', None, 'auto'], doc="""
Output render multi-frame (typically animated) format. If
None, no multi-frame rendering will occur.""")
mode = param.ObjectSelector(default='default', objects=['default'], doc="""
The available rendering modes. As a minimum, the 'default'
mode must be supported.""")
size=param.Integer(100, doc="""
The rendered size as a percentage size""")
widget_mode = param.ObjectSelector(default='embed', objects=['embed', 'live'], doc="""
The widget mode determining whether frames are embedded or generated
'live' when interacting with the widget.""")
css = param.Dict(default={},
doc="Dictionary of CSS attributes and values to apply to HTML output")
info_fn = param.Callable(None, allow_None=True, constant=True, doc="""
Renderers do not support the saving of object info metadata""")
key_fn = param.Callable(None, allow_None=True, constant=True, doc="""
Renderers do not support the saving of object key metadata""")
post_render_hooks = param.Dict(default={'svg':[], 'png':[]}, doc="""
Optional dictionary of hooks that are applied to the rendered
data (according to the output format) before it is returned.
Each hook is passed the rendered data and the object that is
being rendered. These hooks allow post-processing of renderered
data before output is saved to file or displayed.""")
# Defines the valid output formats for each mode.
mode_formats = {'fig': {'default': [None, 'auto']},
'holomap': {'default': [None, 'auto']}}
# Define comms class and message handler for each mode
# The Comm opens a communication channel and the message
# handler defines how the message is processed on the frontend
comms = {'default': (JupyterComm, None)}
# Define appropriate widget classes
widgets = {'scrubber': ScrubberWidget, 'widgets': SelectionWidget}
core_dependencies = {'jQueryUI': {'js': ['https://code.jquery.com/ui/1.10.4/jquery-ui.min.js'],
'css': ['https://code.jquery.com/ui/1.10.4/themes/smoothness/jquery-ui.css']}}
extra_dependencies = {'jQuery': {'js': ['https://code.jquery.com/jquery-2.1.4.min.js']},
'underscore': {'js': ['https://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.8.3/underscore-min.js']},
'require': {'js': ['https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.20/require.min.js']},
'bootstrap': {'css': ['https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css']}}
# Any additional JS and CSS dependencies required by a specific backend
backend_dependencies = {}
def __init__(self, **params):
self.last_plot = None
super(Renderer, self).__init__(**params)
@bothmethod
def get_plot(self_or_cls, obj, renderer=None):
"""
Given a HoloViews Viewable return a corresponding plot instance.
"""
if not isinstance(obj, Plot) and not displayable(obj):
obj = collate(obj)
# Initialize DynamicMaps with first data item
dmaps = obj.traverse(lambda x: x, specs=[DynamicMap])
for dmap in dmaps:
if dmap.sampled:
# Skip initialization until plotting code
continue
if dmap.call_mode == 'key':
dmap[dmap._initial_key()]
else:
try:
next(dmap)
except StopIteration: # Exhausted DynamicMap
raise SkipRendering("DynamicMap generator exhausted.")
if not renderer: renderer = self_or_cls.instance()
if not isinstance(obj, Plot):
obj = Layout.from_values(obj) if isinstance(obj, AdjointLayout) else obj
plot_opts = self_or_cls.plot_options(obj, self_or_cls.size)
plot = self_or_cls.plotting_class(obj)(obj, renderer=renderer,
**plot_opts)
plot.update(0)
else:
plot = obj
return plot
def _validate(self, obj, fmt):
"""
Helper method to be used in the __call__ method to get a
suitable plot or widget object and the appropriate format.
"""
if isinstance(obj, tuple(self.widgets.values())):
return obj, 'html'
plot = self.get_plot(obj, renderer=self)
fig_formats = self.mode_formats['fig'][self.mode]
holomap_formats = self.mode_formats['holomap'][self.mode]
if fmt in ['auto', None]:
if (((len(plot) == 1 and not plot.dynamic)
or (len(plot) > 1 and self.holomap is None) or
(plot.dynamic and len(plot.keys[0]) == 0)) or
not unbound_dimensions(plot.streams, plot.dimensions)):
fmt = fig_formats[0] if self.fig=='auto' else self.fig
else:
fmt = holomap_formats[0] if self.holomap=='auto' else self.holomap
if fmt in self.widgets:
plot = self.get_widget(plot, fmt, display_options={'fps': self.fps})
fmt = 'html'
all_formats = set(fig_formats + holomap_formats)
if fmt not in all_formats:
raise Exception("Format %r not supported by mode %r. Allowed formats: %r"
% (fmt, self.mode, fig_formats + holomap_formats))
self.last_plot = plot
return plot, fmt
def __call__(self, obj, fmt=None):
"""
Render the supplied HoloViews component or plot instance using
the appropriate backend. The output is not a file format but a
suitable, in-memory byte stream together with any suitable
metadata.
"""
plot, fmt = self._validate(obj, fmt)
if plot is None: return
# [Backend specific code goes here to generate data]
data = None
# Example of how post_render_hooks are applied
data = self._apply_post_render_hooks(data, obj, fmt)
# Example of the return format where the first value is the rendered data.
return data, {'file-ext':fmt, 'mime_type':MIME_TYPES[fmt]}
def _apply_post_render_hooks(self, data, obj, fmt):
"""
Apply the post-render hooks to the data.
"""
hooks = self.post_render_hooks.get(fmt,[])
for hook in hooks:
try:
data = hook(data, obj)
except Exception as e:
self.warning("The post_render_hook %r could not be applied:\n\n %s"
% (hook, e))
return data
def html(self, obj, fmt=None, css=None, comm=True, **kwargs):
"""
Renders plot or data structure and wraps the output in HTML.
The comm argument defines whether the HTML output includes
code to initialize a Comm, if the plot supplies one.
"""
plot, fmt = self._validate(obj, fmt)
figdata, _ = self(plot, fmt, **kwargs)
if css is None: css = self.css
if fmt in ['html', 'json']:
return figdata
else:
if fmt == 'svg':
figdata = figdata.encode("utf-8")
elif fmt == 'pdf' and 'height' not in css:
_, h = self.get_size(plot)
css['height'] = '%dpx' % (h*self.dpi*1.15)
if isinstance(css, dict):
css = '; '.join("%s: %s" % (k, v) for k, v in css.items())
else:
raise ValueError("CSS must be supplied as Python dictionary")
b64 = base64.b64encode(figdata).decode("utf-8")
(mime_type, tag) = MIME_TYPES[fmt], HTML_TAGS[fmt]
src = HTML_TAGS['base64'].format(mime_type=mime_type, b64=b64)
html = tag.format(src=src, mime_type=mime_type, css=css)
if comm and plot.comm is not None:
comm, msg_handler = self.comms[self.mode]
msg_handler = msg_handler.format(comms_target=plot.comm.target)
return comm.template.format(init_frame=html,
msg_handler=msg_handler,
comms_target=plot.comm.target)
else:
return html
def static_html(self, obj, fmt=None, template=None):
"""
Generates a static HTML with the rendered object in the
supplied format. Allows supplying a template formatting string
with fields to interpolate 'js', 'css' and the main 'html'.
"""
js_html, css_html = self.html_assets()
if template is None: template = static_template
html = self.html(obj, fmt)
return template.format(js=js_html, css=css_html, html=html)
@bothmethod
def get_widget(self_or_cls, plot, widget_type, **kwargs):
if not isinstance(plot, Plot):
plot = self_or_cls.get_plot(plot)
dynamic = plot.dynamic
if widget_type == 'auto':
isuniform = plot.uniform
if not isuniform:
widget_type = 'scrubber'
else:
widget_type = 'widgets'
elif dynamic == 'open': widget_type = 'scrubber'
elif dynamic == 'bounded': widget_type = 'widgets'
elif widget_type == 'widgets' and dynamic == 'open':
raise ValueError('Selection widgets not supported in dynamic open mode')
elif widget_type == 'scrubber' and dynamic == 'bounded':
raise ValueError('Scrubber widget not supported in dynamic bounded mode')
if widget_type in [None, 'auto']:
holomap_formats = self_or_cls.mode_formats['holomap'][self_or_cls.mode]
widget_type = holomap_formats[0] if self_or_cls.holomap=='auto' else self_or_cls.holomap
widget_cls = self_or_cls.widgets[widget_type]
return widget_cls(plot, renderer=self_or_cls.instance(),
embed=self_or_cls.widget_mode == 'embed', **kwargs)
@bothmethod
def export_widgets(self_or_cls, obj, filename, fmt=None, template=None,
json=False, json_path='', **kwargs):
"""
Render and export object as a widget to a static HTML
file. Allows supplying a custom template formatting string
with fields to interpolate 'js', 'css' and the main 'html'
containing the widget. Also provides options to export widget
data to a json file in the supplied json_path (defaults to
current path).
"""
if fmt not in list(self_or_cls.widgets.keys())+['auto', None]:
raise ValueError("Renderer.export_widget may only export "
"registered widget types.")
if not isinstance(obj, NdWidget):
if not isinstance(filename, BytesIO):
filedir = os.path.dirname(filename)
current_path = os.getcwd()
html_path = os.path.abspath(filedir)
rel_path = os.path.relpath(html_path, current_path)
save_path = os.path.join(rel_path, json_path)
else:
save_path = json_path
kwargs['json_save_path'] = save_path
kwargs['json_load_path'] = json_path
widget = self_or_cls.get_widget(obj, fmt, **kwargs)
else:
widget = obj
html = self_or_cls.static_html(widget, fmt, template)
if isinstance(filename, BytesIO):
filename.write(html)
filename.seek(0)
else:
with open(filename, 'w') as f:
f.write(html)
@classmethod
def plotting_class(cls, obj):
"""
Given an object or Element class, return the suitable plotting
class needed to render it with the current renderer.
"""
if isinstance(obj, AdjointLayout) or obj is AdjointLayout:
obj = Layout
if isinstance(obj, type):
element_type = obj
else:
element_type = obj.type if isinstance(obj, HoloMap) else type(obj)
try:
plotclass = Store.registry[cls.backend][element_type]
except KeyError:
raise SkipRendering("No plotting class for {0} "
"found".format(element_type.__name__))
return plotclass
@classmethod
def html_assets(cls, core=True, extras=True, backends=None):
"""
Returns JS and CSS and for embedding of widgets.
"""
if backends is None:
backends = [cls.backend] if cls.backend else []
# Get all the widgets and find the set of required js widget files
widgets = [wdgt for r in Renderer.__subclasses__()
for wdgt in r.widgets.values()]
css = list({wdgt.css for wdgt in widgets})
basejs = list({wdgt.basejs for wdgt in widgets})
extensionjs = list({wdgt.extensionjs for wdgt in widgets})
# Join all the js widget code into one string
path = os.path.dirname(os.path.abspath(__file__))
widgetjs = '\n'.join(open(find_file(path, f), 'r').read()
for f in basejs + extensionjs
if f is not None )
widgetcss = '\n'.join(open(find_file(path, f), 'r').read()
for f in css if f is not None)
dependencies = {}
if core:
dependencies.update(cls.core_dependencies)
if extras:
dependencies.update(cls.extra_dependencies)
for backend in backends:
dependencies['backend'] = Store.renderers[backend].backend_dependencies
js_html, css_html = '', ''
for _, dep in sorted(dependencies.items(), key=lambda x: x[0]):
js_data = dep.get('js', [])
if isinstance(js_data, tuple):
for js in js_data:
js_html += '\n<script type="text/javascript">%s</script>' % js
else:
for js in js_data:
js_html += '\n<script src="%s" type="text/javascript"></script>' % js
css_data = dep.get('css', [])
if isinstance(js_data, tuple):
for css in css_data:
css_html += '\n<style>%s</style>' % css
else:
for css in css_data:
css_html += '\n<link rel="stylesheet" href="%s">' % css
js_html += '\n<script type="text/javascript">%s</script>' % widgetjs
css_html += '\n<style>%s</style>' % widgetcss
return unicode(js_html), unicode(css_html)
@classmethod
def plot_options(cls, obj, percent_size):
"""
Given an object and a percentage size (as supplied by the
%output magic) return all the appropriate plot options that
would be used to instantiate a plot class for that element.
Default plot sizes at the plotting class level should be taken
into account.
"""
raise NotImplementedError
@bothmethod
def save(self_or_cls, obj, basename, fmt='auto', key={}, info={}, options=None, **kwargs):
"""
Save a HoloViews object to file, either using an explicitly
supplied format or to the appropriate default.
"""
if info or key:
raise Exception('MPLRenderer does not support saving metadata to file.')
with StoreOptions.options(obj, options, **kwargs):
plot = self_or_cls.get_plot(obj)
if (fmt in list(self_or_cls.widgets.keys())+['auto']) and len(plot) > 1:
with StoreOptions.options(obj, options, **kwargs):
self_or_cls.export_widgets(plot, basename+'.html', fmt)
return
with StoreOptions.options(obj, options, **kwargs):
rendered = self_or_cls(plot, fmt)
if rendered is None: return
(data, info) = rendered
if isinstance(basename, BytesIO):
basename.write(data)
basename.seek(0)
else:
encoded = self_or_cls.encode(rendered)
filename ='%s.%s' % (basename, info['file-ext'])
with open(filename, 'wb') as f:
f.write(encoded)
@bothmethod
def get_size(self_or_cls, plot):
"""
Return the display size associated with a plot before
rendering to any particular format. Used to generate
appropriate HTML display.
Returns a tuple of (width, height) in pixels.
"""
raise NotImplementedError
@classmethod
@contextmanager
def state(cls):
"""
Context manager to handle global state for a backend,
allowing Plot classes to temporarily override that state.
"""
yield
@classmethod
def validate(cls, options):
"""
Validate an options dictionary for the renderer.
"""
return options
@classmethod
def load_nb(cls, inline=True):
"""
Loads any resources required for display of plots
in the Jupyter notebook
"""
| 1 | 15,764 | Again, `no_duplicates=False` would be clearer here... | holoviz-holoviews | py |
@@ -36,7 +36,7 @@ import (
// function will terminate current and further iterations without errors, and also close the returned channel.
// Make sure that you check the second returned parameter from the channel to stop iteration when its value
// is false.
-func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan storage.Descriptor, stop func()) {
+func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan storage.Descriptor, closed <-chan struct{}, stop func()) {
db.metrics.SubscribePull.Inc()
chunkDescriptors := make(chan storage.Descriptor) | 1 | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package localstore
import (
"context"
"errors"
"sync"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
)
// SubscribePull returns a channel that provides chunk addresses and stored times from pull syncing index.
// Pull syncing index can be only subscribed to a particular proximity order bin. If since
// is not 0, the iteration will start from the since item (the item with binID == since). If until is not 0,
// only chunks stored up to this id will be sent to the channel, and the returned channel will be
// closed. The since-until interval is closed on since side, and closed on until side: [since,until]. Returned stop
// function will terminate current and further iterations without errors, and also close the returned channel.
// Make sure that you check the second returned parameter from the channel to stop iteration when its value
// is false.
func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan storage.Descriptor, stop func()) {
db.metrics.SubscribePull.Inc()
chunkDescriptors := make(chan storage.Descriptor)
trigger := make(chan struct{}, 1)
db.pullTriggersMu.Lock()
if _, ok := db.pullTriggers[bin]; !ok {
db.pullTriggers[bin] = make([]chan struct{}, 0)
}
db.pullTriggers[bin] = append(db.pullTriggers[bin], trigger)
db.pullTriggersMu.Unlock()
// send signal for the initial iteration
trigger <- struct{}{}
stopChan := make(chan struct{})
var stopChanOnce sync.Once
// used to provide information from the iterator to
// stop subscription when until chunk descriptor is reached
var errStopSubscription = errors.New("stop subscription")
db.subscritionsWG.Add(1)
go func() {
defer db.subscritionsWG.Done()
db.metrics.SubscribePullStop.Inc()
// close the returned store.Descriptor channel at the end to
// signal that the subscription is done
defer close(chunkDescriptors)
// sinceItem is the Item from which the next iteration
// should start. The first iteration starts from the first Item.
var sinceItem *shed.Item
if since > 0 {
sinceItem = &shed.Item{
Address: db.addressInBin(bin).Bytes(),
BinID: since,
}
}
first := true // first iteration flag for SkipStartFromItem
for {
select {
case <-trigger:
// iterate until:
// - last index Item is reached
// - subscription stop is called
// - context is done
db.metrics.SubscribePullIteration.Inc()
iterStart := time.Now()
var count int
err := db.pullIndex.Iterate(func(item shed.Item) (stop bool, err error) {
// until chunk descriptor is sent
// break the iteration
if until > 0 && item.BinID > until {
return true, errStopSubscription
}
select {
case chunkDescriptors <- storage.Descriptor{
Address: swarm.NewAddress(item.Address),
BinID: item.BinID,
}:
if until > 0 && item.BinID == until {
return true, errStopSubscription
}
count++
// set next iteration start item
// when its chunk is successfully sent to channel
sinceItem = &item
return false, nil
case <-stopChan:
// gracefully stop the iteration
// on stop
return true, nil
case <-db.close:
// gracefully stop the iteration
// on database close
return true, nil
case <-ctx.Done():
return true, ctx.Err()
}
}, &shed.IterateOptions{
StartFrom: sinceItem,
// sinceItem was sent as the last Address in the previous
// iterator call, skip it in this one, but not the item with
// the provided since bin id as it should be sent to a channel
SkipStartFromItem: !first,
Prefix: []byte{bin},
})
totalTimeMetric(db.metrics.TotalTimeSubscribePullIteration, iterStart)
if err != nil {
if err == errStopSubscription {
// stop subscription without any errors
// if until is reached
return
}
db.metrics.SubscribePullIterationFailure.Inc()
db.logger.Debugf("localstore pull subscription iteration: bin: %d, since: %d, until: %d: %v", bin, since, until, err)
return
}
if count > 0 {
first = false
}
case <-stopChan:
// terminate the subscription
// on stop
return
case <-db.close:
// terminate the subscription
// on database close
return
case <-ctx.Done():
err := ctx.Err()
if err != nil {
db.logger.Debugf("localstore pull subscription iteration: bin: %d, since: %d, until: %d: %v", bin, since, until, err)
}
return
}
}
}()
stop = func() {
stopChanOnce.Do(func() {
close(stopChan)
})
db.pullTriggersMu.Lock()
defer db.pullTriggersMu.Unlock()
for i, t := range db.pullTriggers[bin] {
if t == trigger {
db.pullTriggers[bin] = append(db.pullTriggers[bin][:i], db.pullTriggers[bin][i+1:]...)
break
}
}
}
return chunkDescriptors, stop
}
// LastPullSubscriptionBinID returns chunk bin id of the latest Chunk
// in pull syncing index for a provided bin. If there are no chunks in
// that bin, 0 value is returned.
func (db *DB) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
db.metrics.LastPullSubscriptionBinID.Inc()
item, err := db.pullIndex.Last([]byte{bin})
if err != nil {
if errors.Is(err, leveldb.ErrNotFound) {
return 0, nil
}
return 0, err
}
return item.BinID, nil
}
// triggerPullSubscriptions is used internally for starting iterations
// on Pull subscriptions for a particular bin. When new item with address
// that is in particular bin for DB's baseKey is added to pull index
// this function should be called.
func (db *DB) triggerPullSubscriptions(bin uint8) {
db.pullTriggersMu.RLock()
defer db.pullTriggersMu.RUnlock()
triggers, ok := db.pullTriggers[bin]
if !ok {
return
}
for _, t := range triggers {
select {
case t <- struct{}{}:
default:
}
}
}
// addressInBin returns an address that is in a specific
// proximity order bin from database base key.
func (db *DB) addressInBin(bin uint8) swarm.Address {
addr := append([]byte(nil), db.baseKey...)
b := bin / 8
addr[b] = addr[b] ^ (1 << (7 - bin%8))
return swarm.NewAddress(addr)
}
| 1 | 10,069 | shouldnt we stop these routines BEFORE the db closed? | ethersphere-bee | go |
@@ -25,6 +25,7 @@ import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Multimap;
+import org.openqa.selenium.devtools.target.model.SessionId;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonInput;
import org.openqa.selenium.remote.http.HttpClient; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.devtools;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.openqa.selenium.json.Json.MAP_TYPE;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Multimap;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonInput;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.WebSocket;
import java.io.Closeable;
import java.io.StringReader;
import java.time.Duration;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
public class Connection implements Closeable {
private static final Json JSON = new Json();
private static final AtomicLong NEXT_ID = new AtomicLong(1L);
private final WebSocket socket;
private final Map<Long, Consumer<JsonInput>> methodCallbacks = new LinkedHashMap<>();
private final Multimap<Event<?>, Consumer<?>> eventCallbacks = HashMultimap.create();
public Connection(HttpClient client, String url) {
Objects.requireNonNull(client, "HTTP client must be set.");
Objects.requireNonNull(url, "URL to connect to must be set.");
socket = client.openSocket(new HttpRequest(GET, url), new Listener());
}
public <X> CompletableFuture<X> send(Target.SessionId sessionId, Command<X> command) {
long id = NEXT_ID.getAndIncrement();
CompletableFuture<X> result = new CompletableFuture<>();
methodCallbacks.put(id, input -> {
X value = command.getMapper().apply(input);
result.complete(value);
});
ImmutableMap.Builder<String, Object> serialized = ImmutableMap.builder();
serialized.put("id", id);
serialized.put("method", command.getMethod());
serialized.put("params", command.getParams());
if (sessionId != null) {
serialized.put("sessionId", sessionId);
}
socket.sendText(JSON.toJson(serialized.build()));
return result;
}
public <X> X sendAndWait(Target.SessionId sessionId, Command<X> command, Duration timeout) {
try {
return send(sessionId, command).get(timeout.toMillis(), MILLISECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException("Thread has been interrupted", e);
} catch (ExecutionException e) {
Throwable cause = e;
if (e.getCause() != null) {
cause = e.getCause();
}
throw new DevToolsException(cause);
} catch (TimeoutException e) {
throw new org.openqa.selenium.TimeoutException(e);
}
}
public <X> void addListener(Event<X> event, Consumer<X> handler) {
Objects.requireNonNull(event);
Objects.requireNonNull(handler);
eventCallbacks.put(event, handler);
}
@Override
public void close() {
socket.close();
}
private class Listener extends WebSocket.Listener {
@Override
public void onText(CharSequence data) {
// It's kind of gross to decode the data twice, but this lets us get started on something
// that feels nice to users.
// TODO: decode once, and once only
String asString = String.valueOf(data);
Map<String, Object> raw = JSON.toType(asString, MAP_TYPE);
if (raw.get("id") instanceof Number && raw.get("result") != null) {
Consumer<JsonInput> consumer = methodCallbacks.remove(((Number) raw.get("id")).longValue());
if (consumer == null) {
return;
}
try (StringReader reader = new StringReader(asString);
JsonInput input = JSON.newInput(reader)) {
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "result":
consumer.accept(input);
break;
default:
input.skipValue();
}
}
input.endObject();
}
} else if (raw.get("method") instanceof String && raw.get("params") instanceof Map) {
System.out.println("Seen: " + raw);
// TODO: Also only decode once.
eventCallbacks.keySet().stream()
.filter(event -> raw.get("method").equals(event.getMethod()))
.forEach(event -> {
// TODO: This is grossly inefficient. I apologise, and we should fix this.
try (StringReader reader = new StringReader(asString);
JsonInput input = JSON.newInput(reader)) {
Object value = null;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "params":
value = event.getMapper().apply(input);
break;
default:
input.skipValue();
break;
}
}
input.endObject();
if (value == null) {
// Do nothing.
return;
}
final Object finalValue = value;
for (Consumer<?> action : eventCallbacks.get(event)) {
@SuppressWarnings("unchecked") Consumer<Object> obj = (Consumer<Object>) action;
obj.accept(finalValue);
}
}
});
} else {
System.out.println("Unhandled type: " + data);
}
}
}
}
| 1 | 16,697 | It fills me with endless sadness that we can't use Selenium's own `SessionId` here. | SeleniumHQ-selenium | py |
@@ -88,7 +88,7 @@ public class Docker {
findImage(new ImageNamePredicate(name, tag));
- LOG.info(String.format("Pulling %s:%s", name, tag));
+ LOG.finest(String.format("Pulling %s:%s", name, tag));
HttpRequest request = new HttpRequest(POST, "/images/create");
request.addQueryParameter("fromImage", name); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.docker;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static org.openqa.selenium.json.Json.MAP_TYPE;
import static org.openqa.selenium.remote.http.Contents.string;
import static org.openqa.selenium.remote.http.Contents.utf8String;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
import com.google.common.reflect.TypeToken;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonException;
import org.openqa.selenium.json.JsonOutput;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.logging.Logger;
public class Docker {
private static final Logger LOG = Logger.getLogger(Docker.class.getName());
private static final Json JSON = new Json();
private final Function<HttpRequest, HttpResponse> client;
public Docker(HttpClient client) {
Objects.requireNonNull(client, "Docker HTTP client must be set.");
this.client = req -> {
try {
HttpResponse resp = client.execute(req);
if (resp.getStatus() < 200 && resp.getStatus() > 200) {
String value = string(resp);
try {
Object obj = JSON.toType(value, Object.class);
if (obj instanceof Map) {
Map<?, ?> map = (Map<?, ?>) obj;
String message = map.get("message") instanceof String ?
(String) map.get("message") :
value;
throw new RuntimeException(message);
}
throw new RuntimeException(value);
} catch (JsonException e) {
throw new RuntimeException(value);
}
}
return resp;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
}
public Image pull(String name, String tag) {
Objects.requireNonNull(name);
Objects.requireNonNull(tag);
findImage(new ImageNamePredicate(name, tag));
LOG.info(String.format("Pulling %s:%s", name, tag));
HttpRequest request = new HttpRequest(POST, "/images/create");
request.addQueryParameter("fromImage", name);
request.addQueryParameter("tag", tag);
client.apply(request);
LOG.info(String.format("Pull of %s:%s complete", name, tag));
return findImage(new ImageNamePredicate(name, tag))
.orElseThrow(() -> new DockerException(
String.format("Cannot find image matching: %s:%s", name, tag)));
}
public List<Image> listImages() {
LOG.fine("Listing images");
HttpResponse response = client.apply(new HttpRequest(GET, "/images/json"));
List<ImageSummary> images =
JSON.toType(string(response), new TypeToken<List<ImageSummary>>() {}.getType());
return images.stream()
.map(Image::new)
.collect(toImmutableList());
}
public Optional<Image> findImage(Predicate<Image> filter) {
Objects.requireNonNull(filter);
LOG.fine("Finding image: " + filter);
return listImages().stream()
.filter(filter)
.findFirst();
}
public Container create(ContainerInfo info) {
StringBuilder json = new StringBuilder();
try (JsonOutput output = JSON.newOutput(json)) {
output.setPrettyPrint(false);
output.write(info);
}
LOG.info("Creating container: " + json);
HttpRequest request = new HttpRequest(POST, "/containers/create");
request.setContent(utf8String(json));
HttpResponse response = client.apply(request);
Map<String, Object> toRead = JSON.toType(string(response), MAP_TYPE);
return new Container(client, new ContainerId((String) toRead.get("Id")));
}
}
| 1 | 16,460 | This will always need to be displayed to users. | SeleniumHQ-selenium | rb |
@@ -1,6 +1,8 @@
class CartDecorator < Draper::Decorator
delegate_all
+ CUSTOM_TEMPLATES = %w(navigator whsc)
+
def total_price
price = object.cart_items.reduce(0) do |sum,citem| sum + citem.quantity * citem.price end
Float("%0.02f" % price) | 1 | class CartDecorator < Draper::Decorator
delegate_all
def total_price
price = object.cart_items.reduce(0) do |sum,citem| sum + citem.quantity * citem.price end
Float("%0.02f" % price)
end
def number_approved
object.approved_approvals.count
end
def total_approvers
object.approver_approvals.count
end
def approvals_by_status
object.approver_approvals.order(
# http://stackoverflow.com/a/6332081/358804
<<-SQL
CASE status
WHEN 'approved' THEN 1
WHEN 'rejected' THEN 2
WHEN 'pending' THEN 3
ELSE 4
END
SQL
)
end
def approvals_in_list_order
if object.flow == 'linear'
object.ordered_approvals
else
self.approvals_by_status
end
end
def display_status
if cart.status == 'pending'
'pending approval'
else
cart.status
end
end
def generate_status_message
if self.all_approvals_received?
completed_status_message
else
progress_status_message
end
end
def completed_status_message
"All #{number_approved} of #{total_approvers} approvals have been received. Please move forward with the purchase of Cart ##{object.external_id}."
end
def progress_status_message
"#{number_approved} of #{total_approvers} approved."
end
def cart_template_name
if self.getProp('origin') == 'navigator'
'navigator_cart'
else
'cart_mail'
end
end
def prefix_template_name
if self.getProp('origin') == 'navigator'
'navigator_prefix'
else
nil
end
end
end
| 1 | 12,231 | Maybe we make an `ORIGINS` constant on the Cart model instead? | 18F-C2 | rb |
@@ -115,7 +115,7 @@ const (
// a) Add 'authorizationConfig', 'transitEncryption' and 'transitEncryptionPort' to 'taskresource.volume.EFSVolumeConfig'
// b) Add 'pauseContainerPID' field to 'taskresource.volume.VolumeResource'
// 28) Add 'envfile' field to 'resources'
- // 29) Add 'ExecCommandAgentMetadata' field to 'apicontainer.Container'
+ // 29) Add 'ManagedAgentsUnsafe' field to 'apicontainer.Container'
ECSDataVersion = 29
| 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package statemanager implements simple constructs for saving and restoring
// state from disk.
// It provides the interface for a StateManager which can read/write arbitrary
// json data from/to disk.
package statemanager
import (
"encoding/json"
"errors"
"os"
"strconv"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/metrics"
"github.com/cihub/seelog"
)
/*
DEPRECATED: state manager is no longer used to store agent state and it remains here
only for backward compatibility purpose (loading state from an old agent that uses
the state manager - see agent/app/data.go, so do not remove it although it's deprecated).
You no longer need to update ECSDataVersion when changing data structure
in agent.
Agent now uses boltDB to store its state. See github.com/aws/amazon-ecs-agent/agent/data
package for the new data persistent interface and use that if you need to deal with data persistence.
*/
const (
// ECSDataVersion is the current version of saved data. Any backwards or
// forwards incompatible changes to the data-format should increment this number
// and retain the ability to read old data versions.
// Version changes:
// 1) initial
// 2)
// a) Add 'ACSSeqNum' top level field (backwards compatible; technically
// forwards compatible but could cause resource constraint violations)
// b) remove 'DEAD', 'UNKNOWN' state from ever being marshalled (backward and
// forward compatible)
// 3) Add 'Protocol' field to 'portMappings' and 'KnownPortBindings'
// 4) Add 'DockerConfig' struct
// 5) Add 'ImageStates' struct as part of ImageManager
// 6)
// a) Refactor 'Internal' field in 'apicontainer.Container' to 'Type' enum
// b) Add 'ContainerResourcesProvisioned' as a new 'ContainerStatus' enum
// c) Add 'SteadyStateStatus' field to 'Container' struct
// d) Add 'ENIAttachments' struct
// e) Deprecate 'SteadyStateDependencies' in favor of 'TransitionDependencySet'
// 7)
// a) Add 'MetadataUpdated' field to 'apicontainer.Container'
// b) Add 'DomainNameServers' and 'DomainNameSearchList' in `api.ENI`
// 8)
// a) Add 'UseExecutionRole' in `api.ECRAuthData`
// b) Add `executionCredentialsID` in `apitask.Task`
// c) Add 'LogsAuthStrategy' field to 'apicontainer.Container'
// d) Added task cgroup related fields ('CPU', 'Memory', 'MemoryCPULimitsEnabled') to 'apitask.Task'
// 9) Add 'ipToTask' map to state file
// 10) Add 'healthCheckType' field in 'apicontainer.Container'
// 11)
// a) Add 'PrivateDNSName' field to 'api.ENI'
// b) Remove `AppliedStatus` field form 'apicontainer.Container'
// 12) Deprecate 'TransitionDependencySet' and add new 'TransitionDependenciesMap' in 'apicontainer.Container'
// 13) Add 'resources' field to 'api.task.task'
// 14) Add 'PlatformFields' field to 'api.task.task'
// 15) Add 'PIDMode' and 'IPCMode' fields to 'api.task.task'
// 16) Add 'V3EndpointID' field to 'Container' struct
// 17)
// a) Add 'secrets' field to 'apicontainer.Container'
// b) Add 'ssmsecret' field to 'resources'
// 18)
// a) Add 'AvailabilityZone' field to the TaskResponse struct
// b) Add 'asmsecret' field to 'resources'
// 19)
// a) Add 'Associations' field to 'api.task.task'
// b) Add 'GPUIDs' field to 'apicontainer.Container'
// c) Add 'NvidiaRuntime' field to 'api.task.task'
// 20)
// a) Add 'DependsOn' field to 'apicontainer.Container'
// b) Add 'StartTime' field to 'api.container.Container'
// c) Add 'StopTime' field to 'api.container.Container'
// 21) Add 'target' field to the Secret struct
// 22)
// a) Add 'attachmentType' field to 'api.ENIAttachment'
// b) Add 'InterfaceAssociationProtocol' field to 'api.ENI'
// c) Add 'InterfaceVlanProperties' field to 'api.ENI'
// 23)
// a) Add 'RuntimeID' field to 'apicontainer.Container'
// b) Add 'FirelensConfig' field to 'Container' struct
// c) Add 'firelens' field to 'resources'
// 24)
// a) Add 'imageDigest' field to 'apicontainer.Container'
// b) Add 'Region', 'ExecutionCredentialsID', 'ExternalConfigType', 'ExternalConfigValue' and 'NetworkMode' to
// firelens task resource.
// 25) Add `seqNumTaskManifest` int field
// 26) Add 'credentialspec' field to 'resources'
// 27)
// a) Add 'authorizationConfig', 'transitEncryption' and 'transitEncryptionPort' to 'taskresource.volume.EFSVolumeConfig'
// b) Add 'pauseContainerPID' field to 'taskresource.volume.VolumeResource'
// 28) Add 'envfile' field to 'resources'
// 29) Add 'ExecCommandAgentMetadata' field to 'apicontainer.Container'
ECSDataVersion = 29
// ecsDataFile specifies the filename in the ECS_DATADIR
ecsDataFile = "ecs_agent_data.json"
// minSaveInterval specifies how frequently to flush to disk
minSaveInterval = 10 * time.Second
)
// Saveable types should be able to be json serializable and deserializable
// Properly, this should have json.Marshaler/json.Unmarshaler here, but string
// and so on can be marshaled/unmarshaled sanely but don't fit those interfaces.
type Saveable interface{}
// Saver is a type that can be saved
type Saver interface {
Save() error
ForceSave() error
}
// Option functions are functions that may be used as part of constructing a new
// StateManager
type Option func(StateManager)
type saveableState map[string]*Saveable
type intermediateSaveableState map[string]json.RawMessage
// State is a struct of all data that should be saveable/loadable to disk. Each
// element should be json-serializable.
//
// Note, changing this to work with BinaryMarshaler or another more compact
// format would be fine, but everything already needs a json representation
// since that's our wire format and the extra space taken / IO-time is expected
// to be fairly negligible.
type state struct {
Data saveableState
Version int
}
type intermediateState struct {
Data intermediateSaveableState
}
type versionOnlyState struct {
Version int
}
type platformDependencies interface{}
// A StateManager can load and save state from disk.
// Load is not expected to return an error if there is no state to load.
type StateManager interface {
Saver
Load() error
}
type basicStateManager struct {
statePath string // The path to a file in which state can be serialized
state *state // pointers to the data we should save / load into
saveTimesLock sync.Mutex // guards save times
lastSave time.Time //the last time a save completed
nextPlannedSave time.Time //the next time a save is planned
savingLock sync.Mutex // guards marshal, write, move (on Linux), and load (on Windows)
platformDependencies platformDependencies // platform-specific dependencies
}
// NewStateManager constructs a new StateManager which saves data at the
// location specified in cfg and operates under the given options.
// The returned StateManager will not save more often than every 10 seconds and
// will not reliably return errors with Save, but will log them appropriately.
func NewStateManager(cfg *config.Config, options ...Option) (StateManager, error) {
fi, err := os.Stat(cfg.DataDir)
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, errors.New("State manager DataDir must exist")
}
state := &state{
Data: make(saveableState),
Version: ECSDataVersion,
}
manager := &basicStateManager{
statePath: cfg.DataDir,
state: state,
}
for _, option := range options {
option(manager)
}
manager.platformDependencies = newPlatformDependencies()
return manager, nil
}
// AddSaveable is an option that adds a given saveable as one that should be saved
// under the given name. The name must be the same across uses of the
// statemanager (e.g. program invocations) for it to be serialized and
// deserialized correctly.
func AddSaveable(name string, saveable Saveable) Option {
return (Option)(func(m StateManager) {
manager, ok := m.(*basicStateManager)
if !ok {
seelog.Critical("Unable to add to state manager; unknown instantiation")
return
}
manager.state.Data[name] = &saveable
})
}
// Save triggers a save to file, though respects a minimum save interval to wait
// between saves.
func (manager *basicStateManager) Save() error {
defer metrics.MetricsEngineGlobal.RecordStateManagerMetric("SAVE")()
manager.saveTimesLock.Lock()
defer manager.saveTimesLock.Unlock()
if time.Since(manager.lastSave) >= minSaveInterval {
// we can just save
err := manager.ForceSave()
manager.lastSave = time.Now()
manager.nextPlannedSave = time.Time{} // re-zero it; assume all pending desires to save are fulfilled
return err
} else if manager.nextPlannedSave.IsZero() {
// No save planned yet, we should plan one.
next := manager.lastSave.Add(minSaveInterval)
manager.nextPlannedSave = next
go func() {
time.Sleep(time.Until(next))
manager.Save()
}()
}
// else nextPlannedSave wasn't Zero so there's a save planned elsewhere that'll
// fulfill this
return nil
}
// ForceSave saves the given State to a file. It is an atomic operation on POSIX
// systems (by Renaming over the target file).
// This function logs errors at will and does not necessarily expect the caller
// to handle the error because there's little a caller can do in general other
// than just keep going.
// In addition, the StateManager internally buffers save requests in order to
// only save at most every STATE_SAVE_INTERVAL.
func (manager *basicStateManager) ForceSave() error {
manager.savingLock.Lock()
defer manager.savingLock.Unlock()
seelog.Info("Saving state!")
s := manager.state
s.Version = ECSDataVersion
data, err := json.Marshal(s)
if err != nil {
seelog.Error("Error saving state; could not marshal data; this is odd", "err", err)
return err
}
return manager.writeFile(data)
}
// Load reads state off the disk from the well-known filepath and loads it into
// the passed State object.
func (manager *basicStateManager) Load() error {
s := manager.state
seelog.Info("Loading state!")
data, err := manager.readFile()
if err != nil {
seelog.Error("Error reading existing state file", "err", err)
return err
}
if data == nil {
return nil
}
// Dry-run to make sure this is a version we can understand
err = manager.dryRun(data)
if err != nil {
return err
}
// Now load it into the actual state. The reason we do this with the
// intermediate state is that we *must* unmarshal directly into the
// "saveable" pointers we were given in AddSaveable; if we unmarshal
// directly into a map with values of pointers, those pointers are lost.
// We *must* unmarshal this way because the existing pointers could have
// semi-initialized data (and are actually expected to)
var intermediate intermediateState
err = json.Unmarshal(data, &intermediate)
if err != nil {
seelog.Debug("Could not unmarshal into intermediate")
return err
}
for key, rawJSON := range intermediate.Data {
actualPointer, ok := manager.state.Data[key]
if !ok {
seelog.Error("Loading state: potentially malformed json key of " + key)
continue
}
err = json.Unmarshal(rawJSON, actualPointer)
if err != nil {
seelog.Debug("Could not unmarshal into actual")
return err
}
}
seelog.Debug("Loaded state!", "state", s)
return nil
}
func (manager *basicStateManager) dryRun(data []byte) error {
// Dry-run to make sure this is a version we can understand
tmps := versionOnlyState{}
err := json.Unmarshal(data, &tmps)
if err != nil {
seelog.Critical("Could not unmarshal existing state; corrupted data?", "err", err, "data", data)
return err
}
if tmps.Version > ECSDataVersion {
strversion := strconv.Itoa(tmps.Version)
return errors.New("Unsupported data format: Version " + strversion + " not " + strconv.Itoa(ECSDataVersion))
}
return nil
}
| 1 | 25,492 | looks like this is not needed anymore with boltdb migration. lets confirm with @fenxiong | aws-amazon-ecs-agent | go |
@@ -409,7 +409,7 @@ func TestRollDPoSConsensus(t *testing.T) {
require.NoError(t, err)
require.NoError(t, sf.Start(ctx))
for j := 0; j < numNodes; j++ {
- ws, err := sf.NewWorkingSet()
+ ws, err := sf.NewWorkingSet(false)
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, chainRawAddrs[j], big.NewInt(0))
require.NoError(t, err) | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rolldpos
import (
"encoding/hex"
"fmt"
"math/big"
"net"
"sync"
"testing"
"time"
"github.com/facebookgo/clock"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
cp "github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/endorsement"
"github.com/iotexproject/iotex-core/p2p/node"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_actpool"
"github.com/iotexproject/iotex-core/test/mock/mock_blockchain"
"github.com/iotexproject/iotex-core/testutil"
)
type addrKeyPair struct {
priKey crypto.PrivateKey
encodedAddr string
}
func TestNewRollDPoS(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cfg := config.Default
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
t.Run("normal", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
})
t.Run("mock-clock", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock.NewMock()).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
_, ok := r.ctx.clock.(*clock.Mock)
assert.True(t, ok)
})
t.Run("root chain API", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetChainManager(mock_blockchain.NewMockBlockchain(ctrl)).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock.NewMock()).
RegisterProtocol(rp).
Build()
assert.NoError(t, err)
assert.NotNil(t, r)
})
t.Run("missing-dep", func(t *testing.T) {
sk := identityset.PrivateKey(0)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(0).String()).
SetPriKey(sk).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
RegisterProtocol(rp).
Build()
assert.Error(t, err)
assert.Nil(t, r)
})
}
func makeBlock(t *testing.T, accountIndex, numOfEndosements int, makeInvalidEndorse bool, height int) *block.Block {
unixTime := 1500000000
blkTime := int64(-1)
if height != 9 {
height = 9
blkTime = int64(-7723372030)
}
timeT := time.Unix(blkTime, 0)
rap := block.RunnableActionsBuilder{}
ra := rap.
SetHeight(uint64(height)).
SetTimeStamp(timeT).
Build(identityset.PrivateKey(accountIndex).PublicKey())
blk, err := block.NewBuilder(ra).
SetVersion(1).
SetReceiptRoot(hash.Hash256b([]byte("hello, world!"))).
SetDeltaStateDigest(hash.Hash256b([]byte("world, hello!"))).
SetPrevBlockHash(hash.Hash256b([]byte("hello, block!"))).
SignAndBuild(identityset.PrivateKey(accountIndex))
require.NoError(t, err)
footerForBlk := &block.Footer{}
typesFooter := iotextypes.BlockFooter{}
for i := 0; i < numOfEndosements; i++ {
timeTime := time.Unix(int64(unixTime), 0)
hs := blk.HashBlock()
var consensusVote *ConsensusVote
if makeInvalidEndorse {
consensusVote = NewConsensusVote(hs[:], LOCK)
} else {
consensusVote = NewConsensusVote(hs[:], COMMIT)
}
en, err := endorsement.Endorse(identityset.PrivateKey(i), consensusVote, timeTime)
require.NoError(t, err)
enProto, err := en.Proto()
require.NoError(t, err)
typesFooter.Endorsements = append(typesFooter.Endorsements, enProto)
}
ts, err := ptypes.TimestampProto(time.Unix(int64(unixTime), 0))
require.NoError(t, err)
typesFooter.Timestamp = ts
require.NotNil(t, typesFooter.Timestamp)
err = footerForBlk.ConvertFromBlockFooterPb(&typesFooter)
require.NoError(t, err)
blk.Footer = *footerForBlk
return &blk
}
func TestValidateBlockFooter(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
candidates := make([]string, 5)
for i := 0; i < len(candidates); i++ {
candidates[i] = identityset.Address(i).String()
}
clock := clock.NewMock()
blockHeight := uint64(8)
footer := &block.Footer{}
blockchain := mock_blockchain.NewMockBlockchain(ctrl)
blockchain.EXPECT().GenesisTimestamp().Return(int64(1500000000)).Times(5)
blockchain.EXPECT().BlockFooterByHeight(blockHeight).Return(footer, nil).Times(5)
blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return([]*state.Candidate{
{Address: candidates[0]},
{Address: candidates[1]},
{Address: candidates[2]},
{Address: candidates[3]},
{Address: candidates[4]},
}, nil).AnyTimes()
sk1 := identityset.PrivateKey(1)
cfg := config.Default
cfg.Genesis.NumDelegates = 4
cfg.Genesis.NumSubEpochs = 1
cfg.Genesis.BlockInterval = 10 * time.Second
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(1).String()).
SetPriKey(sk1).
SetChainManager(blockchain).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
require.NotNil(t, r)
// all right
blk := makeBlock(t, 1, 4, false, 9)
err = r.ValidateBlockFooter(blk)
require.NoError(t, err)
// Proposer is wrong
blk = makeBlock(t, 0, 4, false, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// Not enough endorsements
blk = makeBlock(t, 1, 2, false, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// round information is wrong
blk = makeBlock(t, 1, 4, false, 0)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
// Some endorsement is invalid
blk = makeBlock(t, 1, 4, true, 9)
err = r.ValidateBlockFooter(blk)
require.Error(t, err)
}
func TestRollDPoS_Metrics(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
candidates := make([]string, 5)
for i := 0; i < len(candidates); i++ {
candidates[i] = identityset.Address(i).String()
}
clock := clock.NewMock()
blockHeight := uint64(8)
footer := &block.Footer{}
blockchain := mock_blockchain.NewMockBlockchain(ctrl)
blockchain.EXPECT().TipHeight().Return(blockHeight).Times(1)
blockchain.EXPECT().GenesisTimestamp().Return(int64(1500000000)).Times(2)
blockchain.EXPECT().BlockFooterByHeight(blockHeight).Return(footer, nil).Times(2)
blockchain.EXPECT().CandidatesByHeight(gomock.Any()).Return([]*state.Candidate{
{Address: candidates[0]},
{Address: candidates[1]},
{Address: candidates[2]},
{Address: candidates[3]},
{Address: candidates[4]},
}, nil).AnyTimes()
sk1 := identityset.PrivateKey(1)
cfg := config.Default
cfg.Genesis.NumDelegates = 4
cfg.Genesis.NumSubEpochs = 1
cfg.Genesis.BlockInterval = 10 * time.Second
rp := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
r, err := NewRollDPoSBuilder().
SetConfig(cfg).
SetAddr(identityset.Address(1).String()).
SetPriKey(sk1).
SetChainManager(blockchain).
SetActPool(mock_actpool.NewMockActPool(ctrl)).
SetBroadcast(func(_ proto.Message) error {
return nil
}).
SetClock(clock).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
require.NotNil(t, r)
clock.Add(r.ctx.BlockInterval(blockHeight))
require.NoError(t, r.ctx.Start(context.Background()))
r.ctx.round, err = r.ctx.RoundCalc().UpdateRound(r.ctx.round, blockHeight+1, r.ctx.BlockInterval(blockHeight+1), clock.Now(), 2*time.Second)
require.NoError(t, err)
m, err := r.Metrics()
require.NoError(t, err)
assert.Equal(t, uint64(3), m.LatestEpoch)
cp.SortCandidates(candidates, rp.GetEpochHeight(m.LatestEpoch), cp.CryptoSeed)
assert.Equal(t, candidates[:4], m.LatestDelegates)
assert.Equal(t, candidates[1], m.LatestBlockProducer)
}
// E2E RollDPoS tests bellow
type directOverlay struct {
addr net.Addr
peers map[net.Addr]*RollDPoS
}
func (o *directOverlay) Start(_ context.Context) error { return nil }
func (o *directOverlay) Stop(_ context.Context) error { return nil }
func (o *directOverlay) Broadcast(msg proto.Message) error {
// Only broadcast consensus message
if cMsg, ok := msg.(*iotextypes.ConsensusMessage); ok {
for _, r := range o.peers {
if err := r.HandleConsensusMsg(cMsg); err != nil {
return errors.Wrap(err, "error when handling consensus message directly")
}
}
}
return nil
}
func (o *directOverlay) Tell(uint32, net.Addr, proto.Message) error { return nil }
func (o *directOverlay) Self() net.Addr { return o.addr }
func (o *directOverlay) GetPeers() []net.Addr {
addrs := make([]net.Addr, 0, len(o.peers))
for addr := range o.peers {
addrs = append(addrs, addr)
}
return addrs
}
func TestRollDPoSConsensus(t *testing.T) {
newConsensusComponents := func(numNodes int) ([]*RollDPoS, []*directOverlay, []blockchain.Blockchain) {
cfg := config.Default
cfg.Consensus.RollDPoS.ConsensusDBPath = ""
cfg.Consensus.RollDPoS.Delay = 300 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptBlockTTL = 800 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptProposalEndorsementTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.AcceptLockEndorsementTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.CommitTTL = 400 * time.Millisecond
cfg.Consensus.RollDPoS.FSM.UnmatchedEventTTL = time.Second
cfg.Consensus.RollDPoS.FSM.UnmatchedEventInterval = 10 * time.Millisecond
cfg.Consensus.RollDPoS.ToleratedOvertime = 200 * time.Millisecond
cfg.Genesis.BlockInterval = 2 * time.Second
cfg.Genesis.Blockchain.NumDelegates = uint64(numNodes)
cfg.Genesis.Blockchain.NumSubEpochs = 1
cfg.Genesis.EnableGravityChainVoting = false
chainAddrs := make([]*addrKeyPair, 0, numNodes)
networkAddrs := make([]net.Addr, 0, numNodes)
for i := 0; i < numNodes; i++ {
sk := identityset.PrivateKey(i)
addr := addrKeyPair{
encodedAddr: identityset.Address(i).String(),
priKey: sk,
}
chainAddrs = append(chainAddrs, &addr)
networkAddrs = append(networkAddrs, node.NewTCPNode(fmt.Sprintf("127.0.0.%d:4689", i+1)))
}
chainRawAddrs := make([]string, 0, numNodes)
addressMap := make(map[string]*addrKeyPair)
for _, addr := range chainAddrs {
chainRawAddrs = append(chainRawAddrs, addr.encodedAddr)
addressMap[addr.encodedAddr] = addr
}
cp.SortCandidates(chainRawAddrs, 1, cp.CryptoSeed)
for i, rawAddress := range chainRawAddrs {
chainAddrs[i] = addressMap[rawAddress]
}
candidatesByHeightFunc := func(_ uint64) ([]*state.Candidate, error) {
candidates := make([]*state.Candidate, 0, numNodes)
for _, addr := range chainAddrs {
candidates = append(candidates, &state.Candidate{Address: addr.encodedAddr})
}
return candidates, nil
}
chains := make([]blockchain.Blockchain, 0, numNodes)
p2ps := make([]*directOverlay, 0, numNodes)
cs := make([]*RollDPoS, 0, numNodes)
for i := 0; i < numNodes; i++ {
ctx := context.Background()
cfg.Chain.ProducerPrivKey = hex.EncodeToString(chainAddrs[i].priKey.Bytes())
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
require.NoError(t, err)
require.NoError(t, sf.Start(ctx))
for j := 0; j < numNodes; j++ {
ws, err := sf.NewWorkingSet()
require.NoError(t, err)
_, err = accountutil.LoadOrCreateAccount(ws, chainRawAddrs[j], big.NewInt(0))
require.NoError(t, err)
gasLimit := testutil.TestGasLimit
wsctx := protocol.WithRunActionsCtx(ctx,
protocol.RunActionsCtx{
Producer: identityset.Address(27),
GasLimit: gasLimit,
})
_, err = ws.RunActions(wsctx, 0, nil)
require.NoError(t, err)
require.NoError(t, sf.Commit(ws))
}
registry := protocol.Registry{}
hu := config.NewHeightUpgrade(cfg)
acc := account.NewProtocol(hu)
require.NoError(t, registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
chain := blockchain.NewBlockchain(
cfg,
nil,
blockchain.InMemDaoOption(),
blockchain.PrecreatedStateFactoryOption(sf),
blockchain.RegistryOption(®istry),
)
chain.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(chain))
chain.Validator().AddActionValidators(account.NewProtocol(hu))
chains = append(chains, chain)
actPool, err := actpool.NewActPool(chain, cfg.ActPool, actpool.EnableExperimentalActions())
require.NoError(t, err)
p2p := &directOverlay{
addr: networkAddrs[i],
peers: make(map[net.Addr]*RollDPoS),
}
p2ps = append(p2ps, p2p)
consensus, err := NewRollDPoSBuilder().
SetAddr(chainAddrs[i].encodedAddr).
SetPriKey(chainAddrs[i].priKey).
SetConfig(cfg).
SetChainManager(chain).
SetActPool(actPool).
SetBroadcast(p2p.Broadcast).
SetCandidatesByHeightFunc(candidatesByHeightFunc).
RegisterProtocol(rp).
Build()
require.NoError(t, err)
cs = append(cs, consensus)
}
for i := 0; i < numNodes; i++ {
for j := 0; j < numNodes; j++ {
if i != j {
p2ps[i].peers[p2ps[j].addr] = cs[j]
}
}
}
return cs, p2ps, chains
}
t.Run("1-block", func(t *testing.T) {
// TODO: fix and enable the test
t.Skip()
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 10*time.Second, func() (bool, error) {
for _, chain := range chains {
if chain.TipHeight() < 1 {
return false, nil
}
}
return true, nil
}))
})
t.Run("1-epoch", func(t *testing.T) {
if testing.Short() {
t.Skip("Skip the 1-epoch test in short mode.")
}
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 100*time.Second, func() (bool, error) {
for _, chain := range chains {
if chain.TipHeight() < 48 {
return false, nil
}
}
return true, nil
}))
})
t.Run("network-partition-time-rotation", func(t *testing.T) {
// TODO: fix and enable the test
t.Skip()
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 1 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[1].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
cs[idx].ctx.roundCalc.timeBasedRotation = true
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 60*time.Second, func() (bool, error) {
for i, chain := range chains {
if i == 1 {
continue
}
if chain.TipHeight() < 4 {
return false, nil
}
}
return true, nil
}))
})
t.Run("proposer-network-partition-blocking", func(t *testing.T) {
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 1 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[1].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
time.Sleep(5 * time.Second)
for _, chain := range chains {
header, err := chain.BlockHeaderByHeight(1)
assert.Nil(t, header)
assert.Error(t, err)
}
})
t.Run("non-proposer-network-partition-blocking", func(t *testing.T) {
ctx := context.Background()
cs, p2ps, chains := newConsensusComponents(24)
// 1 should be the block 1's proposer
for i, p2p := range p2ps {
if i == 0 {
p2p.peers = make(map[net.Addr]*RollDPoS)
} else {
delete(p2p.peers, p2ps[0].addr)
}
}
for i := 0; i < 24; i++ {
require.NoError(t, chains[i].Start(ctx))
require.NoError(t, p2ps[i].Start(ctx))
}
wg := sync.WaitGroup{}
wg.Add(24)
for i := 0; i < 24; i++ {
go func(idx int) {
defer wg.Done()
err := cs[idx].Start(ctx)
require.NoError(t, err)
}(i)
}
wg.Wait()
defer func() {
for i := 0; i < 24; i++ {
require.NoError(t, cs[i].Stop(ctx))
require.NoError(t, p2ps[i].Stop(ctx))
require.NoError(t, chains[i].Stop(ctx))
}
}()
assert.NoError(t, testutil.WaitUntil(200*time.Millisecond, 60*time.Second, func() (bool, error) {
for i, chain := range chains {
if i == 0 {
continue
}
if chain.TipHeight() < 2 {
return false, nil
}
}
return true, nil
}))
for i, chain := range chains {
header, err := chain.BlockHeaderByHeight(1)
if i == 0 {
assert.Nil(t, header)
assert.Error(t, err)
} else {
assert.NotNil(t, header)
assert.NoError(t, err)
}
}
})
}
| 1 | 19,583 | shadow: declaration of "err" shadows declaration at line 408 (from `govet`) | iotexproject-iotex-core | go |
@@ -380,7 +380,7 @@ public class Actions {
// Of course, this is the offset from the centre of the element. We have no idea what the width
// and height are once we execute this method.
- LOG.info("When using the W3C Action commands, offsets are from the center of element");
+ LOG.finest("When using the W3C Action commands, offsets are from the center of element");
return moveInTicks(target, xOffset, yOffset);
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.interactions;
import static org.openqa.selenium.interactions.PointerInput.Kind.MOUSE;
import static org.openqa.selenium.interactions.PointerInput.MouseButton.LEFT;
import static org.openqa.selenium.interactions.PointerInput.MouseButton.RIGHT;
import org.openqa.selenium.Keys;
import org.openqa.selenium.UnsupportedCommandException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.interactions.PointerInput.Origin;
import org.openqa.selenium.interactions.internal.MouseAction.Button;
import java.time.Duration;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.IntConsumer;
import java.util.logging.Logger;
/**
* The user-facing API for emulating complex user gestures. Use this class rather than using the
* Keyboard or Mouse directly.
* <p>
* Implements the builder pattern: Builds a CompositeAction containing all actions specified by the
* method calls.
*/
public class Actions {
private final static Logger LOG = Logger.getLogger(Actions.class.getName());
private final WebDriver driver;
// W3C
private final Map<InputSource, Sequence> sequences = new HashMap<>();
private final PointerInput defaultMouse = new PointerInput(MOUSE, "default mouse");
private final KeyInput defaultKeyboard = new KeyInput("default keyboard");
// JSON-wire protocol
private final Keyboard jsonKeyboard;
private final Mouse jsonMouse;
protected CompositeAction action = new CompositeAction();
public Actions(WebDriver driver) {
this.driver = Objects.requireNonNull(driver);
if (driver instanceof HasInputDevices) {
HasInputDevices deviceOwner = (HasInputDevices) driver;
this.jsonKeyboard = deviceOwner.getKeyboard();
this.jsonMouse = deviceOwner.getMouse();
} else {
this.jsonKeyboard = null;
this.jsonMouse = null;
}
}
/**
* Performs a modifier key press. Does not release the modifier key - subsequent interactions
* may assume it's kept pressed.
* Note that the modifier key is <b>never</b> released implicitly - either
* <i>keyUp(theKey)</i> or <i>sendKeys(Keys.NULL)</i>
* must be called to release the modifier.
* @param key Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}. If the
* provided key is none of those, {@link IllegalArgumentException} is thrown.
* @return A self reference.
*/
public Actions keyDown(CharSequence key) {
if (isBuildingActions()) {
action.addAction(new KeyDownAction(jsonKeyboard, jsonMouse, asKeys(key)));
}
return addKeyAction(key, codePoint -> tick(defaultKeyboard.createKeyDown(codePoint)));
}
/**
* Performs a modifier key press after focusing on an element. Equivalent to:
* <i>Actions.click(element).sendKeys(theKey);</i>
* @see #keyDown(CharSequence)
*
* @param key Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}. If the
* provided key is none of those, {@link IllegalArgumentException} is thrown.
* @param target WebElement to perform the action
* @return A self reference.
*/
public Actions keyDown(WebElement target, CharSequence key) {
if (isBuildingActions()) {
action.addAction(new KeyDownAction(jsonKeyboard, jsonMouse, (Locatable) target, asKeys(key)));
}
return focusInTicks(target)
.addKeyAction(key, codepoint -> tick(defaultKeyboard.createKeyDown(codepoint)));
}
/**
* Performs a modifier key release. Releasing a non-depressed modifier key will yield undefined
* behaviour.
*
* @param key Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}.
* @return A self reference.
*/
public Actions keyUp(CharSequence key) {
if (isBuildingActions()) {
action.addAction(new KeyUpAction(jsonKeyboard, jsonMouse, asKeys(key)));
}
return addKeyAction(key, codePoint -> tick(defaultKeyboard.createKeyUp(codePoint)));
}
/**
* Performs a modifier key release after focusing on an element. Equivalent to:
* <i>Actions.click(element).sendKeys(theKey);</i>
* @see #keyUp(CharSequence) on behaviour regarding non-depressed modifier keys.
*
* @param key Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}.
* @param target WebElement to perform the action on
* @return A self reference.
*/
public Actions keyUp(WebElement target, CharSequence key) {
if (isBuildingActions()) {
action.addAction(new KeyUpAction(jsonKeyboard, jsonMouse, (Locatable) target, asKeys(key)));
}
return focusInTicks(target)
.addKeyAction(key, codePoint -> tick(defaultKeyboard.createKeyUp(codePoint)));
}
/**
* Sends keys to the active element. This differs from calling
* {@link WebElement#sendKeys(CharSequence...)} on the active element in two ways:
* <ul>
* <li>The modifier keys included in this call are not released.</li>
* <li>There is no attempt to re-focus the element - so sendKeys(Keys.TAB) for switching
* elements should work. </li>
* </ul>
*
* @see WebElement#sendKeys(CharSequence...)
*
* @param keys The keys.
* @return A self reference.
*
* @throws IllegalArgumentException if keys is null
*/
public Actions sendKeys(CharSequence... keys) {
if (isBuildingActions()) {
action.addAction(new SendKeysAction(jsonKeyboard, jsonMouse, null, keys));
}
return sendKeysInTicks(keys);
}
/**
* Equivalent to calling:
* <i>Actions.click(element).sendKeys(keysToSend).</i>
* This method is different from {@link WebElement#sendKeys(CharSequence...)} - see
* {@link #sendKeys(CharSequence...)} for details how.
*
* @see #sendKeys(java.lang.CharSequence[])
*
* @param target element to focus on.
* @param keys The keys.
* @return A self reference.
*
* @throws IllegalArgumentException if keys is null
*/
public Actions sendKeys(WebElement target, CharSequence... keys) {
if (isBuildingActions()) {
action.addAction(new SendKeysAction(jsonKeyboard, jsonMouse, (Locatable) target, keys));
}
return focusInTicks(target).sendKeysInTicks(keys);
}
private Keys asKeys(CharSequence key) {
if (!(key instanceof Keys)) {
throw new IllegalArgumentException(
"keyDown argument must be an instanceof Keys: " + key);
}
return (Keys) key;
}
private Actions sendKeysInTicks(CharSequence... keys) {
if (keys == null) {
throw new IllegalArgumentException("Keys should be a not null CharSequence");
}
for (CharSequence key : keys) {
key.codePoints().forEach(codePoint -> {
tick(defaultKeyboard.createKeyDown(codePoint));
tick(defaultKeyboard.createKeyUp(codePoint));
});
}
return this;
}
private Actions addKeyAction(CharSequence key, IntConsumer consumer) {
// Verify that we only have a single character to type.
if (key.codePoints().count() != 1) {
throw new IllegalStateException(String.format(
"Only one code point is allowed at a time: %s", key));
}
key.codePoints().forEach(consumer);
return this;
}
/**
* Clicks (without releasing) in the middle of the given element. This is equivalent to:
* <i>Actions.moveToElement(onElement).clickAndHold()</i>
*
* @param target Element to move to and click.
* @return A self reference.
*/
public Actions clickAndHold(WebElement target) {
if (isBuildingActions()) {
action.addAction(new ClickAndHoldAction(jsonMouse, (Locatable) target));
}
return moveInTicks(target, 0, 0)
.tick(defaultMouse.createPointerDown(LEFT.asArg()));
}
/**
* Clicks (without releasing) at the current mouse location.
* @return A self reference.
*/
public Actions clickAndHold() {
if (isBuildingActions()) {
action.addAction(new ClickAndHoldAction(jsonMouse, null));
}
return tick(defaultMouse.createPointerDown(LEFT.asArg()));
}
/**
* Releases the depressed left mouse button, in the middle of the given element.
* This is equivalent to:
* <i>Actions.moveToElement(onElement).release()</i>
*
* Invoking this action without invoking {@link #clickAndHold()} first will result in
* undefined behaviour.
*
* @param target Element to release the mouse button above.
* @return A self reference.
*/
public Actions release(WebElement target) {
if (isBuildingActions()) {
action.addAction(new ButtonReleaseAction(jsonMouse, (Locatable) target));
}
return moveInTicks(target, 0, 0).tick(defaultMouse.createPointerUp(LEFT.asArg()));
}
/**
* Releases the depressed left mouse button at the current mouse location.
* @see #release(org.openqa.selenium.WebElement)
* @return A self reference.
*/
public Actions release() {
if (isBuildingActions()) {
action.addAction(new ButtonReleaseAction(jsonMouse, null));
}
return tick(defaultMouse.createPointerUp(Button.LEFT.asArg()));
}
/**
* Clicks in the middle of the given element. Equivalent to:
* <i>Actions.moveToElement(onElement).click()</i>
*
* @param target Element to click.
* @return A self reference.
*/
public Actions click(WebElement target) {
if (isBuildingActions()) {
action.addAction(new ClickAction(jsonMouse, (Locatable) target));
}
return moveInTicks(target, 0, 0).clickInTicks(LEFT);
}
/**
* Clicks at the current mouse location. Useful when combined with
* {@link #moveToElement(org.openqa.selenium.WebElement, int, int)} or
* {@link #moveByOffset(int, int)}.
* @return A self reference.
*/
public Actions click() {
if (isBuildingActions()) {
action.addAction(new ClickAction(jsonMouse, null));
}
return clickInTicks(LEFT);
}
private Actions clickInTicks(PointerInput.MouseButton button) {
tick(defaultMouse.createPointerDown(button.asArg()));
tick(defaultMouse.createPointerUp(button.asArg()));
return this;
}
private Actions focusInTicks(WebElement target) {
return moveInTicks(target, 0, 0).clickInTicks(LEFT);
}
/**
* Performs a double-click at middle of the given element. Equivalent to:
* <i>Actions.moveToElement(element).doubleClick()</i>
*
* @param target Element to move to.
* @return A self reference.
*/
public Actions doubleClick(WebElement target) {
if (isBuildingActions()) {
action.addAction(new DoubleClickAction(jsonMouse, (Locatable) target));
}
return moveInTicks(target, 0, 0)
.clickInTicks(LEFT)
.clickInTicks(LEFT);
}
/**
* Performs a double-click at the current mouse location.
* @return A self reference.
*/
public Actions doubleClick() {
if (isBuildingActions()) {
action.addAction(new DoubleClickAction(jsonMouse, null));
}
return clickInTicks(LEFT).clickInTicks(LEFT);
}
/**
* Moves the mouse to the middle of the element. The element is scrolled into view and its
* location is calculated using getBoundingClientRect.
* @param target element to move to.
* @return A self reference.
*/
public Actions moveToElement(WebElement target) {
if (isBuildingActions()) {
action.addAction(new MoveMouseAction(jsonMouse, (Locatable) target));
}
return moveInTicks(target, 0, 0);
}
/**
* Moves the mouse to an offset from the top-left corner of the element.
* The element is scrolled into view and its location is calculated using getBoundingClientRect.
* @param target element to move to.
* @param xOffset Offset from the top-left corner. A negative value means coordinates left from
* the element.
* @param yOffset Offset from the top-left corner. A negative value means coordinates above
* the element.
* @return A self reference.
*/
public Actions moveToElement(WebElement target, int xOffset, int yOffset) {
if (isBuildingActions()) {
action.addAction(new MoveToOffsetAction(jsonMouse, (Locatable) target, xOffset, yOffset));
}
// Of course, this is the offset from the centre of the element. We have no idea what the width
// and height are once we execute this method.
LOG.info("When using the W3C Action commands, offsets are from the center of element");
return moveInTicks(target, xOffset, yOffset);
}
private Actions moveInTicks(WebElement target, int xOffset, int yOffset) {
return tick(defaultMouse.createPointerMove(
Duration.ofMillis(100),
Origin.fromElement(target),
xOffset,
yOffset));
}
/**
* Moves the mouse from its current position (or 0,0) by the given offset. If the coordinates
* provided are outside the viewport (the mouse will end up outside the browser window) then
* the viewport is scrolled to match.
* @param xOffset horizontal offset. A negative value means moving the mouse left.
* @param yOffset vertical offset. A negative value means moving the mouse up.
* @return A self reference.
* @throws MoveTargetOutOfBoundsException if the provided offset is outside the document's
* boundaries.
*/
public Actions moveByOffset(int xOffset, int yOffset) {
if (isBuildingActions()) {
action.addAction(new MoveToOffsetAction(jsonMouse, null, xOffset, yOffset));
}
return tick(
defaultMouse.createPointerMove(Duration.ofMillis(200), Origin.pointer(), xOffset, yOffset));
}
/**
* Performs a context-click at middle of the given element. First performs a mouseMove
* to the location of the element.
*
* @param target Element to move to.
* @return A self reference.
*/
public Actions contextClick(WebElement target) {
if (isBuildingActions()) {
action.addAction(new ContextClickAction(jsonMouse, (Locatable) target));
}
return moveInTicks(target, 0, 0).clickInTicks(RIGHT);
}
/**
* Performs a context-click at the current mouse location.
* @return A self reference.
*/
public Actions contextClick() {
if (isBuildingActions()) {
action.addAction(new ContextClickAction(jsonMouse, null));
}
return clickInTicks(RIGHT);
}
/**
* A convenience method that performs click-and-hold at the location of the source element,
* moves to the location of the target element, then releases the mouse.
*
* @param source element to emulate button down at.
* @param target element to move to and release the mouse at.
* @return A self reference.
*/
public Actions dragAndDrop(WebElement source, WebElement target) {
if (isBuildingActions()) {
action.addAction(new ClickAndHoldAction(jsonMouse, (Locatable) source));
action.addAction(new MoveMouseAction(jsonMouse, (Locatable) target));
action.addAction(new ButtonReleaseAction(jsonMouse, (Locatable) target));
}
return moveInTicks(source, 0, 0)
.tick(defaultMouse.createPointerDown(LEFT.asArg()))
.moveInTicks(target, 0, 0)
.tick(defaultMouse.createPointerUp(LEFT.asArg()));
}
/**
* A convenience method that performs click-and-hold at the location of the source element,
* moves by a given offset, then releases the mouse.
*
* @param source element to emulate button down at.
* @param xOffset horizontal move offset.
* @param yOffset vertical move offset.
* @return A self reference.
*/
public Actions dragAndDropBy(WebElement source, int xOffset, int yOffset) {
if (isBuildingActions()) {
action.addAction(new ClickAndHoldAction(jsonMouse, (Locatable) source));
action.addAction(new MoveToOffsetAction(jsonMouse, null, xOffset, yOffset));
action.addAction(new ButtonReleaseAction(jsonMouse, null));
}
return moveInTicks(source, 0, 0)
.tick(defaultMouse.createPointerDown(LEFT.asArg()))
.tick(defaultMouse.createPointerMove(Duration.ofMillis(250), Origin.pointer(), xOffset, yOffset))
.tick(defaultMouse.createPointerUp(LEFT.asArg()));
}
/**
* Performs a pause.
*
* @param pause pause duration, in milliseconds.
* @return A self reference.
*/
public Actions pause(long pause) {
if (isBuildingActions()) {
action.addAction(new PauseAction(pause));
}
return tick(new Pause(defaultMouse, Duration.ofMillis(pause)));
}
public Actions pause(Duration duration) {
Objects.requireNonNull(duration, "Duration of pause not set");
if (isBuildingActions()) {
action.addAction(new PauseAction(duration.toMillis()));
}
return tick(new Pause(defaultMouse, duration));
}
public Actions tick(Interaction... actions) {
// All actions must be for a unique source.
Set<InputSource> seenSources = new HashSet<>();
for (Interaction action : actions) {
boolean freshlyAdded = seenSources.add(action.getSource());
if (!freshlyAdded) {
throw new IllegalStateException(String.format(
"You may only add one action per input source per tick: %s",
Arrays.asList(actions)));
}
}
// Add all actions to sequences
for (Interaction action : actions) {
Sequence sequence = getSequence(action.getSource());
sequence.addAction(action);
}
// And now pad the remaining sequences with a pause.
Set<InputSource> unseen = new HashSet<>(sequences.keySet());
unseen.removeAll(seenSources);
for (InputSource source : unseen) {
getSequence(source).addAction(new Pause(source, Duration.ZERO));
}
return this;
}
public Actions tick(Action action) {
if (!(action instanceof IsInteraction)) {
throw new IllegalStateException("Expected action to implement IsInteraction");
}
for (Interaction interaction :
((IsInteraction) action).asInteractions(defaultMouse, defaultKeyboard)) {
tick(interaction);
}
if (isBuildingActions()) {
this.action.addAction(action);
}
return this;
}
/**
* Generates a composite action containing all actions so far, ready to be performed (and
* resets the internal builder state, so subsequent calls to {@link #build()} will contain fresh
* sequences).
*
* @return the composite action
*/
public Action build() {
Action toReturn = new BuiltAction(driver, new LinkedHashMap<>(sequences), action);
action = new CompositeAction();
sequences.clear();
return toReturn;
}
/**
* A convenience method for performing the actions without calling build() first.
*/
public void perform() {
build().perform();
}
private Sequence getSequence(InputSource source) {
Sequence sequence = sequences.get(source);
if (sequence != null) {
return sequence;
}
int longest = 0;
for (Sequence examining : sequences.values()) {
longest = Math.max(longest, examining.size());
}
sequence = new Sequence(source, longest);
sequences.put(source, sequence);
return sequence;
}
private boolean isBuildingActions() {
return jsonMouse != null || jsonKeyboard != null;
}
private static class BuiltAction implements Action {
private final WebDriver driver;
private final Map<InputSource, Sequence> sequences;
private final Action fallBack;
private BuiltAction(WebDriver driver, Map<InputSource, Sequence> sequences, Action fallBack) {
this.driver = driver;
this.sequences = sequences;
this.fallBack = fallBack;
}
@Override
public void perform() {
if (driver == null) {
// One of the deprecated constructors was used. Fall back to the old way for now.
fallBack.perform();
return;
}
try {
((Interactive) driver).perform(sequences.values());
} catch (ClassCastException | UnsupportedCommandException e) {
// Fall back to the old way of doing things. Old Skool #ftw
fallBack.perform();
}
}
}
}
| 1 | 16,442 | Again, the existing level is correct --- we're letting users know about something that might cause there tests to fail. | SeleniumHQ-selenium | rb |
@@ -34,7 +34,7 @@ type ProfileDecoder struct {
}
func NewProfileDecoder(callbacks passthruCallbacks) *ProfileDecoder {
- return &ProfileDecoder{callbacks: callbacks, converter: conversion.Converter{}}
+ return &ProfileDecoder{callbacks: callbacks, converter: conversion.NewConverter()}
}
func (p *ProfileDecoder) RegisterWith(d *dispatcher.Dispatcher) { | 1 | // Copyright (c) 2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package calc
import (
"strings"
"github.com/projectcalico/felix/dispatcher"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
"github.com/projectcalico/libcalico-go/lib/backend/model"
log "github.com/sirupsen/logrus"
)
// ProfileDecoder takes updates from a dispatcher, determines if the profile is a Kubernetes Service Account or
// Kubernetes Namespace, and if it is, generates a dataplane update or remove for it.
type ProfileDecoder struct {
callbacks passthruCallbacks
converter conversion.Converter
}
func NewProfileDecoder(callbacks passthruCallbacks) *ProfileDecoder {
return &ProfileDecoder{callbacks: callbacks, converter: conversion.Converter{}}
}
func (p *ProfileDecoder) RegisterWith(d *dispatcher.Dispatcher) {
d.Register(model.ProfileLabelsKey{}, p.OnUpdate)
}
func (p *ProfileDecoder) OnUpdate(update api.Update) (filterOut bool) {
// This type assertion is safe because we only registered for ProfileLabels updates.
key := update.Key.(model.ProfileLabelsKey)
log.WithField("key", key.String()).Debug("Decoding ProfileLabels")
idInterface := p.classifyProfile(key)
switch id := idInterface.(type) {
case nil:
log.WithField("key", key.String()).Debug("Ignoring ProfileLabels")
case proto.ServiceAccountID:
if update.Value == nil {
p.callbacks.OnServiceAccountRemove(id)
} else {
labels := update.Value.(map[string]string)
msg := proto.ServiceAccountUpdate{
Id: &id, Labels: decodeLabels(conversion.ServiceAccountLabelPrefix, labels)}
p.callbacks.OnServiceAccountUpdate(&msg)
}
case proto.NamespaceID:
if update.Value == nil {
p.callbacks.OnNamespaceRemove(id)
} else {
labels := update.Value.(map[string]string)
msg := proto.NamespaceUpdate{
Id: &id, Labels: decodeLabels(conversion.NamespaceLabelPrefix, labels)}
p.callbacks.OnNamespaceUpdate(&msg)
}
}
return false
}
func (p *ProfileDecoder) classifyProfile(key model.ProfileLabelsKey) interface{} {
namespace, name, err := p.converter.ProfileNameToServiceAccount(key.Name)
if err == nil {
return proto.ServiceAccountID{Name: name, Namespace: namespace}
}
name, err = p.converter.ProfileNameToNamespace(key.Name)
if err == nil {
return proto.NamespaceID{Name: name}
}
return nil
}
// decodeLabels strips the special prefix we add to Profile labels when converting. This gives us the original labels on
// the ServiceAccount or Namespace object.
func decodeLabels(prefix string, in map[string]string) map[string]string {
out := make(map[string]string)
for k, v := range in {
k = strings.TrimPrefix(k, prefix)
out[k] = v
}
return out
}
| 1 | 17,619 | Required by the libcalico-go changes | projectcalico-felix | c |
@@ -438,10 +438,12 @@ void write_variables_gnuplot(struct histogram *h, struct histogram *all)
FILE *f = open_file(fname);
free(fname);
+ fprintf(f, "%s = %" PRId64"\n", "current_buckets", h->nbuckets);
fprintf(f, "%s = %lf\n", "current_minimum", h->min_value);
fprintf(f, "%s = %lf\n", "current_maximum", h->max_value);
fprintf(f, "%s = %lf\n", "current_mode", h->value_at_max_count);
fprintf(f, "%s = %" PRId64"\n", "current_mode_count", h->max_count);
+ fprintf(f, "%s = %" PRId64"\n", "current_mode_min_count", h->min_count);
fprintf(f, "%s = %lf\n", "current_mean", h->mean);
fprintf(f, "%s = %lf\n", "current_percentile75", value_of_p(h, 0.75));
fprintf(f, "%s = %lf\n", "current_percentile25", value_of_p(h, 0.25)); | 1 | /*
Copyright (C) 2015- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include <float.h>
#include <omp.h>
#include "rmon_tools.h"
#include "create_dir.h"
#include "category.h"
#include "macros.h"
#include "copy_stream.h"
#define MAX_LINE 1024
#define OUTLIER_DIR "outliers"
#define OUTLIER_N 5
#define MAX_P 1.00
#define value_at_index(h, idx) (value_of_field((h)->summaries_sorted[(idx)], (h)->resource))
int gnuplots_running = 0;
static int width = 900;
static int height = 600;
static int width_thumb = 372; //186;
static int height_thumb = 248; //124;
char *format = "png";
char *gnuplot_path = "gnuplot";
int webpage_mode = 1;
char *output_directory = NULL;
struct histogram {
struct field *resource;
char *units;
struct rmDsummary_set *source;
struct rmDsummary **summaries_sorted;
int total_count;
double bin_size;
double z_95;
double z_99;
double min_value;
double max_value;
uint64_t count_at_min_value;
uint64_t count_at_max_value;
uint64_t max_count; //i.e., how many times the mode occurs.
uint64_t min_count;
double value_at_max_count; //i.e., mode
double value_at_min_count;
double mean;
double variance;
double std_dev;
double kurtosis;
double skewdness;
uint64_t first_allocation_histogram;
uint64_t first_allocation_bruteforce;
struct itable *buckets;
uint64_t nbuckets;
char *output_directory;
};
struct list *all_sets;
struct rmDsummary *max_values;
struct rmDsummary_set *all_summaries;
struct hash_table *unique_strings;
int split_categories = 0;
struct hash_table *categories;
int brute_force = 0;
char *unique_string(char *str)
{
char *tmp = hash_table_lookup(unique_strings, str);
if(tmp)
return tmp;
else
{
tmp = xxstrdup(str);
hash_table_insert(unique_strings, str, tmp);
}
return tmp;
}
void split_summaries_on_category(struct rmDsummary_set *source)
{
struct itable *splits = itable_create(0);
struct rmDsummary *s;
struct rmDsummary_set *bucket;
char *label;
list_first_item(source->summaries);
while((s = list_next_item(source->summaries)))
{
label = unique_string(s->category);
bucket = itable_lookup(splits, (uint64_t) ((uintptr_t) label));
if(!bucket)
{
bucket = make_new_set(label);
itable_insert(splits, (uint64_t) ((uintptr_t) label), bucket);
list_push_tail(all_sets, bucket);
}
list_push_tail(bucket->summaries, s);
}
itable_delete(splits);
}
static struct field *sort_field;
int less_than(const void *a, const void *b)
{
struct rmDsummary *sa = * (struct rmDsummary * const *) a;
struct rmDsummary *sb = * (struct rmDsummary * const *) b;
double fa = value_of_field(sa, sort_field);
double fb = value_of_field(sb, sort_field);
return (fa > fb);
}
void sort_by_field(struct histogram *h, struct field *f)
{
sort_field = f;
qsort(h->summaries_sorted, h->total_count, sizeof(struct rmDsummary *), less_than);
}
int index_of_p(struct histogram *h, double p)
{
return (int) ceil((h->total_count - 1) * p);
}
double value_of_p(struct histogram *h, double p)
{
return value_at_index(h, index_of_p(h, p));
}
double set_bin_size_by_iqr(struct histogram *h)
{
double v_25 = value_of_p(h, 0.25);
double v_75 = value_of_p(h, 0.75);
if(v_75 > v_25)
return h->bin_size = 2*(v_75 - v_25)*pow((double) h->total_count, (-1.0/3.0));
else
return h->bin_size = 1.0;
}
uint64_t get_bucket_count(struct histogram *h, uint64_t bucket)
{
return (uint64_t) ((uintptr_t) itable_lookup(h->buckets, bucket + 1));
}
double get_bucket_value(struct histogram *h, uint64_t bucket)
{
return h->bin_size * (bucket);
}
uint64_t bucket_of(struct histogram *h, double value)
{
return (uint64_t) floor(value/h->bin_size);
}
uint64_t increment_bucket(struct histogram *h, double value)
{
uint64_t bucket = bucket_of(h, value);
uint64_t count = get_bucket_count(h, bucket);
count += 1;
itable_insert(h->buckets, bucket + 1, (void *) ((uintptr_t) count));
return count;
}
void set_min_max_value_of_field(struct histogram *h, struct field *f)
{
h->min_value = value_of_field(h->summaries_sorted[0], f);
h->max_value = value_of_field(h->summaries_sorted[h->total_count - 1], f);
h->count_at_min_value = (uintptr_t) get_bucket_count(h, bucket_of(h, h->min_value));
h->count_at_max_value = (uintptr_t) get_bucket_count(h, bucket_of(h, h->max_value));
}
double set_average_of_field(struct histogram *h, struct field *f)
{
double accum = 0;
int i;
for(i = 0; i < h->total_count; i++)
accum += value_of_field(h->summaries_sorted[i], f);
h->mean = accum/h->total_count;
return h->mean;
}
double set_variance_of_field(struct histogram *h, struct field *f)
{
double accum = 0;
int i;
for(i = 0; i < h->total_count; i++)
accum += pow(value_of_field(h->summaries_sorted[i], f) - h->mean, 2);
if(h->total_count > 1)
{
h->variance = accum/(h->total_count - 1);
h->std_dev = sqrt(h->variance);
}
else
{
h->variance = -1;
h->std_dev = -1;
}
return h->variance;
}
double set_skewdness_of_field(struct histogram *h, struct field *f)
{
double accum = 0;
int i;
for(i = 0; i < h->total_count; i++)
accum += pow(value_of_field(h->summaries_sorted[i], f) - h->mean, 3);
if(h->total_count > 1 && h->variance != 0)
h->skewdness = (accum/(pow(h->std_dev, 3) * (h->total_count - 1)));
else
h->skewdness = 0;
return h->skewdness;
}
double set_kurtosis_of_field(struct histogram *h, struct field *f)
{
double accum = 0;
int i;
for(i = 0; i < h->total_count; i++)
accum += pow(value_of_field(h->summaries_sorted[i], f) - h->mean, 4);
if(h->total_count > 1 && h->variance != 0)
h->kurtosis = (accum/(pow(h->variance, 2) * (h->total_count - 1))) - 3;
else
h->kurtosis = 0;
return h->kurtosis;
}
void set_z_scores(struct histogram *h)
{
//one tail
h->z_95 = h->mean + h->std_dev * 1.645;
h->z_99 = h->mean + h->std_dev * 2.33;
}
uint64_t set_min_max_count(struct histogram *h)
{
uint64_t bucket;
uint64_t count;
h->max_count = 0;
h->min_count = INT_MAX;
h->value_at_max_count = 0;
itable_firstkey(h->buckets);
while(itable_nextkey(h->buckets, &bucket, (void *) &count))
{
if(count > h->max_count)
{
h->value_at_max_count = get_bucket_value(h, bucket - 1);
h->max_count = count;
}
if(count < h->min_count)
{
h->value_at_min_count = get_bucket_value(h, bucket - 1);
h->min_count = count;
}
}
return h->max_count;
}
char *path_common(struct histogram *h, int only_base_name)
{
char *category = sanitize_path_name(h->source->category);
char *prefix;
if(only_base_name)
{
prefix = "";
}
else
{
prefix = h->output_directory;
}
char *path = string_format("%s%s_%s", prefix, category, h->resource->name);
free(category);
return path;
}
char *path_of_table(struct histogram *h, int only_base_name)
{
char *common = path_common(h, only_base_name);
char *path = string_format("%s_table.data", common);
free(common);
return path;
}
char *path_of_variables_script(struct histogram *h, int only_base_name)
{
char *common = path_common(h, only_base_name);
char *path = string_format("%s_vars.gnuplot", common);
free(common);
return path;
}
char *path_of_thumbnail_script(struct histogram *h, int only_base_name)
{
char *common = path_common(h, only_base_name);
char *path = string_format("%s_%dx%d.gnuplot", common, width_thumb, height_thumb);
free(common);
return path;
}
char *path_of_thumbnail_image(struct histogram *h, int only_base_name)
{
char *common = path_common(h, only_base_name);
char *path = string_format("%s_%dx%d.%s", common, width_thumb, height_thumb, format);
free(common);
return path;
}
char *path_of_image_script(struct histogram *h, int only_base_name)
{
char *common = path_common(h, only_base_name);
char *path = string_format("%s_%dx%d.gnuplot", common, width, height);
free(common);
return path;
}
char *path_of_image(struct histogram *h, int only_base_name)
{
char *common = path_common(h, only_base_name);
char *path = string_format("%s_%dx%d.%s", common, width, height, format);
free(common);
return path;
}
char *path_of_page(struct histogram *h, int only_base_name)
{
char *common = path_common(h, only_base_name);
char *path = string_format("%s.html", common);
free(common);
return path;
}
void create_output_directory(struct histogram *h)
{
char *category = sanitize_path_name(h->source->category);
char *all_path = string_format("%s/%s/", output_directory, category);
if(create_dir(all_path, 0755) < 0 && errno != EEXIST)
fatal("Could not create directory: %s\n", all_path);
h->output_directory = all_path;
free(category);
}
FILE *open_file(char *filename)
{
FILE *file = fopen(filename, "w");
if(!file)
fatal("Could not open file for writing: %s\n", filename);
return file;
}
void write_histogram_table(struct histogram *h)
{
char *fname = path_of_table(h, 0);
FILE *f = open_file(fname);
free(fname);
uint64_t bucket;
uint64_t count;
itable_firstkey(h->buckets);
while(itable_nextkey(h->buckets, &bucket, (void *) &count))
fprintf(f, "%lf %" PRIu64 "\n", get_bucket_value(h, bucket - 1), count);
fclose(f);
}
void write_variables_gnuplot(struct histogram *h, struct histogram *all)
{
char *fname = path_of_variables_script(h, 0);
FILE *f = open_file(fname);
free(fname);
fprintf(f, "%s = %lf\n", "current_minimum", h->min_value);
fprintf(f, "%s = %lf\n", "current_maximum", h->max_value);
fprintf(f, "%s = %lf\n", "current_mode", h->value_at_max_count);
fprintf(f, "%s = %" PRId64"\n", "current_mode_count", h->max_count);
fprintf(f, "%s = %lf\n", "current_mean", h->mean);
fprintf(f, "%s = %lf\n", "current_percentile75", value_of_p(h, 0.75));
fprintf(f, "%s = %lf\n", "current_percentile25", value_of_p(h, 0.25));
fprintf(f, "%s = %" PRId64"\n", "current_first_allocation", h->first_allocation_histogram);
fprintf(f, "%s = %lf\n", "current_bin_size", h->bin_size);
if(all) {
fprintf(f, "%s = %lf\n", "all_minimum", all->min_value);
fprintf(f, "%s = %lf\n", "all_maximum", all->max_value);
fprintf(f, "%s = %lf\n", "all_mode", all->value_at_max_count);
fprintf(f, "%s = %" PRId64"\n", "all_mode_count", all->max_count);
fprintf(f, "%s = %lf\n", "all_mean", all->mean);
fprintf(f, "%s = %lf\n", "all_percentile75", value_of_p(all, 0.75));
fprintf(f, "%s = %lf\n", "all_percentile25", value_of_p(all, 0.25));
fprintf(f, "%s = %" PRId64"\n", "all_first_allocation", all->first_allocation_histogram);
}
fclose(f);
}
void write_thumbnail_gnuplot(struct histogram *h, struct histogram *all)
{
char *fname = path_of_thumbnail_script(h, 0);
FILE *f = open_file(fname);
free(fname);
fname = path_of_variables_script(h, 1);
fprintf(f, "load \"%s\"\n", fname);
free(fname);
fprintf(f, "set terminal pngcairo truecolor rounded size %d,%d enhanced font \"times,10\"\n",
width_thumb, height_thumb);
fname = path_of_thumbnail_image(h, 1);
fprintf(f, "set output \"%s\"\n", fname);
free(fname);
fprintf(f, "unset key\n");
fprintf(f, "unset border\n");
fprintf(f, "set style line 1 lc 16\n");
fprintf(f, "set style fill solid noborder 0.45\n");
fprintf(f, "set tmargin 2\n");
fprintf(f, "set bmargin 2\n");
fprintf(f, "unset tics\n");
fprintf(f, "set arrow from current_minimum,graph -0.01 to current_percentile25,graph -0.01 nohead lc 16\n");
fprintf(f, "set arrow from current_percentile75,graph -0.01 to current_maximum,graph -0.01 nohead lc 16\n");
/* square for mean */
fprintf(f, "set label \"\" at current_mean,graph 0.00 tc ls 1 center front point pt 4\n");
/* up triangle for mode */
fprintf(f, "set label sprintf(\"%%.0f\", current_mode) at current_mode,graph -0.05 tc ls 1 center front point pt 8 offset 0,character -0.90\n");
/* down triangle for first allocation */
fprintf(f, "set label \"\" at current_first_allocation,graph -0.025 tc ls 1 center front point pt 10\n");
if(h == all)
{
fprintf(f, "set label sprintf(\"%%.0f\", all_minimum) at all_minimum,graph -0.01 tc ls 1 right front nopoint offset character -1.0,character -0.25\n");
fprintf(f, "set label sprintf(\"%%.0f\", all_maximum) at all_maximum,graph -0.01 tc ls 1 left front nopoint offset character 1.0,character -0.25\n");
}
if( all->nbuckets == 1 )
{
fprintf(f, "set boxwidth 1.0*(all_maximum - all_minimum + 1)/50 absolute\n");
fprintf(f, "set xrange [all_minimum - 1 : all_maximum + 2]\n");
}
else
{
fprintf(f, "gap = (all_maximum - all_minimum)/5.0\n");
fprintf(f, "set boxwidth (0.1 > current_bin_size ? 0.1 : current_bin_size) absolute\n");
fprintf(f, "set xrange [all_minimum - gap : all_maximum + gap]\n");
}
char *table_name = path_of_table(h, 1);
if(all->max_count > 10000*all->min_count)
{
fprintf(f, "set yrange [0:(log10(all_mode_count))]\n");
fprintf(f, "set label sprintf(\"log(%%d)\",current_mode_count) at current_mode,(log10(current_mode_count)) tc ls 1 left front nopoint offset 0,character 0.5\n");
fprintf(f, "plot \"%s\" using 1:(log10($2)) w boxes\n", table_name);
}
else
{
fprintf(f, "set yrange [0:all_mode_count]\n");
fprintf(f, "set label sprintf(\"%%d\", current_mode_count) at current_mode,current_mode_count tc ls 1 left front nopoint offset 0,character 0.5\n");
fprintf(f, "plot \"%s\" using 1:2 w boxes\n", table_name);
}
free(table_name);
fprintf(f, "\n");
fclose(f);
}
void write_image_gnuplot(struct histogram *h, struct histogram *all)
{
char *fname = path_of_image_script(h, 0);
FILE *f = open_file(fname);
free(fname);
fname = path_of_variables_script(h, 1);
fprintf(f, "load \"%s\"\n", fname);
free(fname);
fprintf(f, "set terminal pngcairo truecolor rounded size %d,%d enhanced font \"times,12\"\n",
width, height);
fname = path_of_image(h, 1);
fprintf(f, "set output \"%s\"\n", fname);
free(fname);
fprintf(f, "unset key\n");
fprintf(f, "unset border\n");
fprintf(f, "set style line 1 lc 16\n");
fprintf(f, "set style fill solid noborder 0.45\n");
fprintf(f, "set tmargin 2\n");
fprintf(f, "set bmargin 2\n");
fprintf(f, "unset tics\n");
fprintf(f, "set arrow from current_minimum,graph -0.01 to current_percentile25,graph -0.01 nohead lc 16\n");
fprintf(f, "set arrow from current_percentile75,graph -0.01 to current_maximum,graph -0.01 nohead lc 16\n");
/* square for mean */
fprintf(f, "set label \"\" at current_mean,graph -0.00 tc ls 1 center front point pt 4\n");
/* up triangle for mode */
fprintf(f, "set label sprintf(\"%%.0f\", current_mode) at current_mode,graph -0.05 tc ls 1 center front point pt 8 offset 0,character -0.90\n");
/* down triangle for first allocation */
fprintf(f, "set label \"\" at current_first_allocation,graph -0.025 tc ls 1 center front point pt 10\n");
fprintf(f, "set label sprintf(\"%%.0f\", all_minimum) at all_minimum,graph -0.01 tc ls 1 right front nopoint offset character -1.0,character -0.25\n");
fprintf(f, "set label sprintf(\"%%.0f\", all_maximum) at all_maximum,graph -0.01 tc ls 1 left front nopoint offset character 1.0,character -0.25\n");
if( all->nbuckets == 1 )
{
fprintf(f, "set boxwidth (all_maximum - all_minimum + 1)/50 absolute\n");
fprintf(f, "set xrange [all_minimum - 1 : all_maximum + 2]\n");
}
else
{
fprintf(f, "gap = (all_maximum - all_minimum)/5.0\n");
fprintf(f, "set boxwidth (0.1 > current_bin_size ? 0.1 : current_bin_size) absolute\n");
fprintf(f, "set xrange [all_minimum - gap : all_maximum + gap]\n");
}
char *table_name = path_of_table(h, 1);
if(h->max_count > 10000*h->min_count)
{
fprintf(f, "set yrange [0:(log10(all_mode_count))]\n");
fprintf(f, "set label sprintf(\"log(%%d)\",current_mode_count) at current_mode,(log10(current_mode_count)) tc ls 1 left front nopoint offset 0,character 0.5\n");
fprintf(f, "plot \"%s\" using 1:(log10($2)) w boxes\n", table_name);
}
else
{
fprintf(f, "set yrange [0:all_mode_count]\n");
fprintf(f, "set label sprintf(\"%%d\", current_mode_count) at current_mode,current_mode_count tc ls 1 left front nopoint offset 0,character 0.5\n");
fprintf(f, "plot \"%s\" using 1:2 w boxes\n", table_name);
}
free(table_name);
fprintf(f, "\n");
fclose(f);
}
void write_images(struct histogram *h)
{
pid_t pid;
pid = fork();
if(pid < 0)
{
fatal("Could not fork when creating thumbnail: %s\n", path_of_thumbnail_image(h, 0));
}
if(pid == 0) {
char *path = string_format("%s/%s", output_directory, sanitize_path_name(h->source->category));
chdir(path);
execlp(gnuplot_path, "gnuplot", path_of_thumbnail_script(h, 1), NULL);
fatal("Could not exec when creating thumbnail: %s\n", path_of_thumbnail_image(h, 0));
}
pid = fork();
if(pid < 0)
{
fatal("Could not fork when creating image: %s\n", path_of_image(h, 0));
}
if(pid == 0) {
char *path = string_format("%s/%s", output_directory, sanitize_path_name(h->source->category));
chdir(path);
execlp(gnuplot_path, "gnuplot", path_of_image_script(h, 1), NULL);
fatal("Could not exec when creating image: %s\n", path_of_image(h, 0));
}
gnuplots_running += 2;
}
struct histogram *histogram_of_field(struct rmDsummary_set *source, struct field *f, char *out_dir)
{
struct histogram *h = malloc(sizeof(struct histogram));
h->total_count = list_size(source->summaries);
h->summaries_sorted = malloc(h->total_count * sizeof(struct rmDsummary *));
struct rmDsummary *s;
list_first_item(source->summaries);
int i = 0;
while((s = list_next_item(source->summaries)))
{
h->summaries_sorted[i] = s;
i++;
}
sort_by_field(h, f);
h->source = source;
h->buckets = itable_create(0);
h->resource = f;
create_output_directory(h);
set_bin_size_by_iqr(h);
double value;
list_first_item(source->summaries);
while((s = list_next_item(source->summaries)))
{
value = value_of_field(s, f);
increment_bucket(h, value);
}
h->nbuckets = itable_size(h->buckets);
set_min_max_value_of_field(h, f);
set_min_max_count(h);
set_average_of_field(h, f);
set_variance_of_field(h, f);
set_skewdness_of_field(h,f);
set_kurtosis_of_field(h, f);
set_z_scores(h);
itable_insert(source->histograms, (uint64_t) ((uintptr_t) f), (void *) h);
debug(D_RMON, "%s-%s:\n buckets: %" PRIu64 " bin_size: %lf max_count: %" PRIu64 " mode: %lf\n", h->source->category, h->resource->caption, h->nbuckets, h->bin_size, h->max_count, h->value_at_max_count);
return h;
}
void write_histogram_stats_header(FILE *stream)
{
fprintf(stream, "%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
"resource",
"n",
"mean", "std_dev", "skewd", "kurtos",
"max", "min", "first_alloc",
"p_25", "p_50", "p_75", "p_95", "p_99",
"z_95", "z_99"
);
}
void write_histogram_stats(FILE *stream, struct histogram *h)
{
char *resource_no_spaces = sanitize_path_name(h->resource->name);
fprintf(stream, "%s %d %.3lf %.3lf %.3lf %.3lf %.3lf %.3lf %" PRId64 " %.3lf %.3lf %.3lf %.3lf %.3lf %.3lf %.3lf\n",
resource_no_spaces,
h->total_count,
h->mean, h->std_dev, h->skewdness, h->kurtosis,
h->max_value, h->min_value, h->first_allocation_histogram,
value_of_p(h, 0.25),
value_of_p(h, 0.50),
value_of_p(h, 0.75),
value_of_p(h, 0.95),
value_of_p(h, 0.99),
h->z_95,
h->z_99);
free(resource_no_spaces);
}
void histograms_of_category(struct rmDsummary_set *ss)
{
/* construct histograms of category across all resources */
struct field *f;
for(f = &fields[WALL_TIME]; f->name != NULL; f++)
{
if(!f->active)
continue;
histogram_of_field(ss, f, output_directory);
}
}
void plots_of_category(struct rmDsummary_set *s)
{
struct histogram *h;
/* construct histograms of category across all resources */
struct field *f;
for(f = &fields[WALL_TIME]; f->name != NULL; f++)
{
if(!f->active)
continue;
h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
struct histogram *all = itable_lookup(all_summaries->histograms, (uint64_t) ((uintptr_t) f));
if(!all)
all = h;
write_histogram_table(h);
write_variables_gnuplot(h, all);
write_thumbnail_gnuplot(h, all);
write_image_gnuplot(h, all);
write_images(h);
}
while(gnuplots_running)
{
wait(NULL);
gnuplots_running--;
}
}
void find_first_allocation_of_category_histogram(struct rmDsummary_set *s, struct hash_table *categories) {
struct field *f;
struct histogram *h;
struct category *c = category_lookup_or_create(categories, s->category);
if(!c->max_allocation)
c->max_allocation = rmsummary_create(-1);
for(f = &fields[WALL_TIME]; f->name != NULL; f++)
{
if(!f->active)
continue;
h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
int64_t value;
rmsummary_to_internal_unit(f->name, value_of_p(h, MAX_P), &value, f->units);
rmsummary_assign_int_field(c->max_allocation, f->name, value);
}
category_update_first_allocation(categories, s->category);
for(f = &fields[WALL_TIME]; f->name != NULL; f++)
{
if(!f->active)
continue;
h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
h->first_allocation_histogram = -1;
if(c->first_allocation) {
int64_t first = rmsummary_get_int_field(c->first_allocation, f->name);
h->first_allocation_histogram = rmsummary_to_external_unit(f->name, first);
}
}
}
void find_first_allocation_of_field_bruteforce(struct rmDsummary_set *s, struct field *f) {
struct histogram *h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
double min_waste = DBL_MAX;
int64_t min_candidate = value_of_p(h, MAX_P);
int64_t max_candidate = value_of_p(h, MAX_P);
uint64_t prev = 0;
for(int i = 0; i < h->total_count; i++) {
uint64_t candidate = value_of_field(h->summaries_sorted[i], f);
double candidate_waste = 0;
if( i > 0 ) {
if(candidate - prev < 1)
continue;
}
for(int j = 0; j < h->total_count; j+=1) {
double current = value_of_field(h->summaries_sorted[j], f);
double wall_time;
if(f->cummulative) {
wall_time = 1;
} else {
wall_time = h->summaries_sorted[j]->wall_time;
}
if(max_candidate < current)
continue;
double current_waste;
if(current > candidate) {
current_waste = (max_candidate - current + candidate)*wall_time;
} else {
current_waste = (candidate - current)*wall_time;
}
candidate_waste += current_waste;
if(candidate_waste > min_waste)
break;
}
if(candidate_waste < min_waste) {
min_candidate = candidate;
min_waste = candidate_waste;
}
prev = candidate;
}
debug(D_RMON, "first allocation '%s' brute force: %" PRId64, f->caption, min_candidate);
h->first_allocation_bruteforce = min_candidate;
}
void find_first_allocation_of_category_bruteforce(struct rmDsummary_set *s) {
int i = WALL_TIME;
int n = NUM_FIELDS;
#pragma omp parallel for schedule(dynamic,1) private(i)
for(i = WALL_TIME; i < n; i++)
{
struct field *f = (fields + i);
if(!f->active)
continue;
find_first_allocation_of_field_bruteforce(s, f);
}
}
void write_stats_of_category(struct rmDsummary_set *s)
{
char *f_stats_raw = sanitize_path_name(s->category);
char *filename = string_format("%s/%s.stats", output_directory, f_stats_raw);
FILE *f_stats = open_file(filename);
free(f_stats_raw);
free(filename);
write_histogram_stats_header(f_stats);
struct histogram *h;
struct field *f;
for(f = &fields[WALL_TIME]; f->name != NULL; f++)
{
if(!f->active)
continue;
h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
write_histogram_stats(f_stats, h);
}
fclose(f_stats);
}
void write_limits_of_category(struct rmDsummary_set *s, double p_cut)
{
char *f_stats_raw = sanitize_path_name(s->category);
char *filename = string_format("%s/%s.limits", output_directory, f_stats_raw);
FILE *f_limits = open_file(filename);
free(filename);
free(f_stats_raw);
struct field *f;
struct histogram *h;
for(f = &fields[WALL_TIME]; f->name != NULL; f++)
{
if(!f->active)
continue;
h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
fprintf(f_limits, "%s: %" PRIu64 "\n", f->name, (int64_t) ceil(value_of_p(h, p_cut)));
}
fclose(f_limits);
}
char *copy_outlier(struct rmDsummary *s)
{
static int count = 0;
count++;
char *base = string_format("outlier-%d.summary", count);
char *outlier = string_format("%s/%s/%s", output_directory, OUTLIER_DIR, base);
char dir[PATH_MAX];
path_dirname(outlier, dir);
create_dir(dir, S_IRWXU);
FILE *output = fopen(outlier, "w");
if(output) {
rmDsummary_print(output, s);
fclose(output);
} else {
debug(D_NOTICE, "Could not create outlier summary: %s\n", outlier);
outlier = NULL;
}
free(outlier);
return base;
}
void write_outlier(FILE *stream, struct rmDsummary *s, struct field *f, char *prefix)
{
char *outlier_name;
outlier_name = copy_outlier(s);
if(!outlier_name)
return;
if(!prefix)
{
prefix = "";
}
fprintf(stream, "<td class=\"data\">\n");
fprintf(stream, "<a href=%s%s/%s>(%s)</a>", prefix, OUTLIER_DIR, outlier_name, s->task_id);
fprintf(stream, "<br><br>\n");
fprintf(stream, "%d\n", (int) value_of_field(s, f));
fprintf(stream, "</td>\n");
}
void write_css_style(FILE *stream)
{
fprintf(stream,
"\n<style media=\"screen\" type=\"text/css\">\n"
"table { font-size: small; border-collapse: collapse; }\n"
"td { text-align: right; padding: 5px; border: 1px solid rgb(216,216,216); }\n"
"td.datahdr { text-align: center; border-top: 0; }\n"
"td.task { text-align: left; border-right: 0; }\n"
"td.data { text-align: center; border-left: 0; }\n"
"\n</style>\n"
);
}
void write_webpage_stats_header(FILE *stream, struct histogram *h)
{
fprintf(stream, "<td class=\"data\">%s", h->resource->caption);
if(h->resource->units)
{
fprintf(stream, " (%s)", h->resource->units);
}
fprintf(stream, "</td>");
fprintf(stream, "<td class=\"datahdr\" >mode <br> ▵</td>");
fprintf(stream, "<td class=\"datahdr\" >μ <br> ▫ </td>");
fprintf(stream, "<td class=\"datahdr\" >1<sup>st</sup> alloc.<br> ▿ </td>");
if(brute_force) {
fprintf(stream, "<td class=\"datahdr\" >1<sup>st</sup> alloc. b.f.</td>");
}
fprintf(stream, "<td class=\"datahdr\" >σ/μ</td>");
fprintf(stream, "<td class=\"datahdr\" >p<sub>99</sub></td>");
fprintf(stream, "<td class=\"datahdr\" >p<sub>95</sub></td>");
}
void write_webpage_stats(FILE *stream, struct histogram *h, char *prefix, int include_thumbnail)
{
struct field *f = h->resource;
fprintf(stream, "<td>");
if(include_thumbnail)
{
fprintf(stream, "<a href=\"../%s\">", path_of_page(h, 0));
fprintf(stream, "<img src=\"../%s\">", path_of_thumbnail_image(h, 0));
fprintf(stream, "</a>");
}
fprintf(stream, "</td>");
fprintf(stream, "<td class=\"data\"> -- <br><br>\n");
fprintf(stream, "%6.0lf\n", h->value_at_max_count);
fprintf(stream, "</td>\n");
fprintf(stream, "<td class=\"data\"> -- <br><br>\n");
fprintf(stream, "%6.0lf\n", h->mean);
fprintf(stream, "</td>\n");
fprintf(stream, "<td class=\"data\"> -- <br><br>\n");
fprintf(stream, "%" PRId64 "\n", h->first_allocation_histogram);
fprintf(stream, "</td>\n");
if(brute_force) {
fprintf(stream, "<td class=\"data\"> -- <br><br>\n");
fprintf(stream, "%" PRId64 "\n", h->first_allocation_bruteforce);
fprintf(stream, "</td>\n");
}
fprintf(stream, "<td class=\"data\"> -- <br><br>\n");
fprintf(stream, "%6.2lf\n", h->mean > 0 ? h->std_dev/h->mean : -1);
fprintf(stream, "</td>\n");
struct rmDsummary *s;
s = h->summaries_sorted[index_of_p(h, 0.99)];
write_outlier(stream, s, f, prefix);
s = h->summaries_sorted[index_of_p(h, 0.95)];
write_outlier(stream, s, f, prefix);
}
void write_individual_histogram_webpage(struct histogram *h)
{
char *fname = path_of_page(h, 0);
FILE *fo = open_file(fname);
free(fname);
struct field *f = h->resource;
fprintf(fo, "<head>\n");
fprintf(fo, "<title> %s : %s </title>\n", h->source->category, f->caption);
write_css_style(fo);
fprintf(fo, "</head>\n");
fprintf(fo, "<body>\n");
fprintf(fo, "<tr>\n");
fprintf(fo, "<table>\n");
fprintf(fo, "<td rowspan=\"%d\">\n", OUTLIER_N + 2);
fprintf(fo, "<img src=\"%s\">", path_of_image(h, 1));
fprintf(fo, "</td>\n");
fprintf(fo, "</tr>\n");
fprintf(fo, "<tr>\n");
fprintf(fo, "<td class=\"data\"> maxs </td> <td> </td> <td class=\"data\"> mins </td>\n");
fprintf(fo, "</tr>\n");
int i;
struct rmDsummary *s;
int outliers = h->total_count < OUTLIER_N ? h->total_count : OUTLIER_N;
for(i = 0; i < outliers; i++)
{
fprintf(fo, "<tr>\n");
s = h->summaries_sorted[h->total_count - i - 1];
write_outlier(fo, s, f, "../");
fprintf(fo, "<td> </td>");
s = h->summaries_sorted[i];
write_outlier(fo, s, f, "../");
fprintf(fo, "</tr>\n");
}
fprintf(fo, "</table>\n");
fprintf(fo, "<table>\n");
fprintf(fo, "<tr>\n");
write_webpage_stats_header(fo, h);
fprintf(fo, "</tr>\n");
fprintf(fo, "<tr>\n");
write_webpage_stats(fo, h, "../", 0);
fprintf(fo, "</tr>\n");
fprintf(fo, "</table>\n");
fprintf(fo, "</body>\n");
fclose(fo);
}
void write_front_page(char *workflow_name)
{
FILE *fo;
char *filename = string_format("%s/index.html", output_directory);
fo = fopen(filename, "w");
int columns = brute_force ? 8 : 7;
if(!fo)
fatal("Could not open file %s for writing: %s\n", strerror(errno));
fprintf(fo, "<head>\n");
fprintf(fo, "<title> %s </title>\n", workflow_name);
write_css_style(fo);
fprintf(fo, "</head>\n");
fprintf(fo, "<body>\n");
fprintf(fo, "<table>\n");
fprintf(fo, "<tr>\n");
struct rmDsummary_set *s;
list_first_item(all_sets);
while((s = list_next_item(all_sets)))
{
fprintf(fo, "<td class=\"datahdr\" colspan=\"%d\">%s: %d</td>", columns, s->category, list_size(s->summaries));
}
fprintf(fo, "</tr>\n");
struct field *f;
struct histogram *h;
for(f = &fields[WALL_TIME]; f->name != NULL; f++)
{
if(!f->active)
continue;
fprintf(fo, "<tr>\n");
list_first_item(all_sets);
while((s = list_next_item(all_sets)))
{
h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
write_webpage_stats_header(fo, h);
}
fprintf(fo, "</tr>\n");
fprintf(fo, "<tr>\n");
list_first_item(all_sets);
while((s = list_next_item(all_sets)))
{
h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
write_webpage_stats(fo, h, NULL, 1);
}
fprintf(fo, "</tr>\n");
}
fprintf(fo, "</table>\n");
fprintf(fo, "</body>\n");
}
void write_webpage(char *workflow_name)
{
write_front_page(workflow_name);
struct rmDsummary_set *s;
list_first_item(all_sets);
while((s = list_next_item(all_sets)))
{
struct histogram *h;
struct field *f;
for(f = &fields[WALL_TIME]; f->name != NULL; f++)
{
if(!f->active)
continue;
h = itable_lookup(s->histograms, (uint64_t) ((uintptr_t) f));
write_individual_histogram_webpage(h);
}
}
}
static void show_usage(const char *cmd)
{
fprintf(stdout, "\nUse: %s [options] output_directory [workflow_name]\n\n", cmd);
fprintf(stdout, "\nIf no -D or -L are specified, read the summary file list from standard input.\n\n");
fprintf(stdout, "%-20s Enable debugging for this subsystem.\n", "-d <subsystem>");
fprintf(stdout, "%-20s Send debugging to this file. (can also be :stderr, :stdout, :syslog, or :journal)\n", "-o <file>");
fprintf(stdout, "%-20s Read summaries recursively from <dir> (filename of form '%s[0-9]+%s').\n", "-D <dir>", RULE_PREFIX, RULE_SUFFIX);
fprintf(stdout, "%-20s Read summaries filenames from file <list>.\n", "-L <list>");
fprintf(stdout, "%-20s Split on task categories.\n", "-s");
fprintf(stdout, "%-20s Use brute force to compute proposed resource allocations.\n", "-b");
fprintf(stdout, "%-20s Select these fields for the histograms. (Default is: tcvmsrwhz).\n\n", "-f <fields>");
fprintf(stdout, "<fields> is a string in which each character should be one of the following:\n");
fprintf(stdout, "%s", make_field_names_str("\n"));
fprintf(stdout, "%-20s Show this message.\n", "-h,--help");
}
int main(int argc, char **argv)
{
char *input_directory = NULL;
char *input_list = NULL;
char *workflow_name = NULL;
unique_strings = hash_table_create(0, 0);
debug_config(argv[0]);
signed char c;
while( (c = getopt(argc, argv, "bD:d:f:hL:o:s")) > -1 )
{
switch(c)
{
case 'D':
input_directory = xxstrdup(optarg);
break;
case 'L':
input_list = xxstrdup(optarg);
break;
case 'd':
debug_flags_set(optarg);
break;
case 'o':
debug_config_file(optarg);
break;
case 'f':
parse_fields_options(optarg);
break;
case 's':
split_categories = 1;
break;
case 'b':
brute_force = 1;
break;
case 'h':
show_usage(argv[0]);
exit(0);
break;
default:
show_usage(argv[0]);
exit(1);
break;
}
}
if(argc - optind < 1)
{
show_usage(argv[0]);
exit(1);
}
if(!input_directory && !input_list)
{
input_list = "-";
}
output_directory = argv[optind];
char *outlier_dir = string_format("%s/%s", output_directory, OUTLIER_DIR);
if(create_dir(outlier_dir, 0755) < 0 && errno != EEXIST)
fatal("Could not create outliers directory.");
free(outlier_dir);
if(argc - optind > 1)
{
workflow_name = argv[optind + 1];
}
else
{
workflow_name = output_directory;
}
categories = hash_table_create(0, 0);
all_sets = list_create();
/* read and parse all input summaries */
all_summaries = make_new_set(ALL_SUMMARIES_CATEGORY);
if(input_directory)
{
parse_summary_recursive(all_summaries, input_directory, categories);
}
if(input_list)
{
parse_summary_from_filelist(all_summaries, input_list, categories);
}
list_push_head(all_sets, all_summaries);
if(list_size(all_summaries->summaries) > 0)
{
if(split_categories)
{
/* partition summaries on category name */
split_summaries_on_category(all_summaries);
}
/* construct histograms across all categories/resources. */
struct rmDsummary_set *s;
list_first_item(all_sets);
while((s = list_next_item(all_sets)))
{
histograms_of_category(s);
find_first_allocation_of_category_histogram(s, categories);
if(brute_force) {
find_first_allocation_of_category_bruteforce(s);
}
write_stats_of_category(s);
write_limits_of_category(s, 0.95);
if(webpage_mode)
{
plots_of_category(s);
}
}
}
if(webpage_mode)
{
write_webpage(workflow_name);
}
return 0;
}
/* vim: set noexpandtab tabstop=4: */
| 1 | 12,694 | You want "current_min_count" | cooperative-computing-lab-cctools | c |
@@ -85,14 +85,10 @@ public class MediaStoreProvider {
MediaStore.Files.FileColumns.PARENT, MediaStore.Images.Media.BUCKET_DISPLAY_NAME
};
- String selection, selectionArgs[];
-
- selection =
- MediaStore.Files.FileColumns.MEDIA_TYPE
- + "=? ) GROUP BY ( "
- + MediaStore.Files.FileColumns.PARENT
- + " ";
- selectionArgs = new String[] {String.valueOf(MediaStore.Files.FileColumns.MEDIA_TYPE_IMAGE)};
+ String selection = MediaStore.Files.FileColumns.MEDIA_TYPE + "=? ";
+ String[] selectionArgs =
+ new String[] {String.valueOf(MediaStore.Files.FileColumns.MEDIA_TYPE_IMAGE)};
+ String sortOrder = MediaStore.Files.FileColumns.PARENT;
Cursor cur =
context | 1 | package org.fossasia.phimpme.gallery.data.providers;
import android.content.Context;
import android.database.Cursor;
import android.net.Uri;
import android.provider.MediaStore;
import androidx.annotation.Nullable;
import java.io.File;
import java.util.ArrayList;
import java.util.HashSet;
import org.fossasia.phimpme.gallery.data.Album;
import org.fossasia.phimpme.gallery.data.CustomAlbumsHelper;
import org.fossasia.phimpme.gallery.data.Media;
import org.fossasia.phimpme.gallery.data.base.ImageFileFilter;
import org.fossasia.phimpme.gallery.util.ContentHelper;
import org.fossasia.phimpme.gallery.util.StringUtils;
import org.jetbrains.annotations.TestOnly;
/** Created by dnld on 24/07/16. */
public class MediaStoreProvider {
private static ArrayList<String> excludedAlbums;
public static ArrayList<Album> getAlbums(Context context, boolean hidden) {
excludedAlbums = getExcludedFolders(context);
return hidden ? getHiddenAlbums(context) : getAlbums(context);
}
private static ArrayList<Album> getHiddenAlbums(Context context) {
ArrayList<Album> list = new ArrayList<Album>();
String[] projection =
new String[] {MediaStore.Files.FileColumns.DATA, MediaStore.Files.FileColumns.PARENT};
String selection =
MediaStore.Files.FileColumns.MEDIA_TYPE
+ "="
+ MediaStore.Files.FileColumns.MEDIA_TYPE_NONE
+ " and "
+ MediaStore.Files.FileColumns.DATA
+ " LIKE '%.nomedia'";
Cursor cur =
context
.getContentResolver()
.query(MediaStore.Files.getContentUri("external"), projection, selection, null, null);
if (cur != null && cur.moveToFirst()) {
do {
File folder = new File(cur.getString(0)).getParentFile();
File[] files = folder.listFiles(new ImageFileFilter(true));
if (files != null && files.length > 0) {
Album album =
new Album(context, folder.getAbsolutePath(), -1, folder.getName(), files.length);
// TODO: 21/08/16 sort and find?
long lastMod = Long.MIN_VALUE;
File f = null;
for (File file : files) {
if (file.lastModified() > lastMod) {
f = file;
lastMod = file.lastModified();
}
}
if (f != null && !isExcluded(f.getPath(), context)) {
album.addMedia(new Media(f.getPath(), f.lastModified()));
list.add(album);
}
}
} while (cur.moveToNext());
cur.close();
}
return list;
}
private static boolean isExcluded(String path, Context context) {
if (excludedAlbums == null) {
excludedAlbums = getExcludedFolders(context);
}
for (String s : excludedAlbums) if (path.startsWith(s)) return true;
return false;
}
public static ArrayList<Album> getAlbums(Context context) {
ArrayList<Album> list = new ArrayList<Album>();
String[] projection =
new String[] {
MediaStore.Files.FileColumns.PARENT, MediaStore.Images.Media.BUCKET_DISPLAY_NAME
};
String selection, selectionArgs[];
selection =
MediaStore.Files.FileColumns.MEDIA_TYPE
+ "=? ) GROUP BY ( "
+ MediaStore.Files.FileColumns.PARENT
+ " ";
selectionArgs = new String[] {String.valueOf(MediaStore.Files.FileColumns.MEDIA_TYPE_IMAGE)};
Cursor cur =
context
.getContentResolver()
.query(
MediaStore.Files.getContentUri("external"),
projection,
selection,
selectionArgs,
null);
if (cur != null) {
if (cur.moveToFirst()) {
int idColumn = cur.getColumnIndex(MediaStore.Files.FileColumns.PARENT);
int nameColumn = cur.getColumnIndex(MediaStore.Images.Media.BUCKET_DISPLAY_NAME);
do {
Media media = getLastMedia(context, cur.getLong(idColumn));
if (media != null && media.getPath() != null) {
String path = StringUtils.getBucketPathByImagePath(media.getPath());
boolean excluded = isExcluded(path, context);
if (!excluded) {
Album album =
new Album(
context,
path,
cur.getLong(idColumn),
cur.getString(nameColumn),
getAlbumCount(context, cur.getLong(idColumn)));
if (album.addMedia(getLastMedia(context, album.getId()))) list.add(album);
}
}
} while (cur.moveToNext());
}
cur.close();
}
return list;
}
private static ArrayList<String> getExcludedFolders(Context context) {
ArrayList<String> list = new ArrayList<String>();
// forced excluded folder
HashSet<File> storageRoots = ContentHelper.getStorageRoots(context);
for (File file : storageRoots) {
list.add(new File(file.getPath(), "Android").getPath());
}
CustomAlbumsHelper handler = CustomAlbumsHelper.getInstance(context);
list.addAll(handler.getExcludedFoldersPaths());
return list;
}
private static int getAlbumCount(Context context, long id) {
int c = 0;
String selection =
"( "
+ MediaStore.Files.FileColumns.MEDIA_TYPE
+ "=? ) and "
+ MediaStore.Files.FileColumns.PARENT
+ "=?";
String[] selectionArgs =
new String[] {
String.valueOf(MediaStore.Files.FileColumns.MEDIA_TYPE_IMAGE), String.valueOf(id)
};
Cursor cur =
context
.getContentResolver()
.query(
MediaStore.Files.getContentUri("external"),
new String[] {MediaStore.Files.FileColumns.PARENT},
selection,
selectionArgs,
null);
if (cur != null) {
c = cur.getCount();
cur.close();
}
return c;
}
@Nullable
private static Media getLastMedia(Context context, long albumId) {
ArrayList<Media> list = getMedia(context, albumId, 1);
return list.size() > 0 ? list.get(0) : null;
}
public static ArrayList<Media> getMedia(Context context, long albumId) {
return getMedia(context, albumId, -1);
}
private static ArrayList<Media> getMedia(Context context, long albumId, int n) {
String limit = n == -1 ? "" : "LIMIT " + n;
ArrayList<Media> list = new ArrayList<Media>();
String[] projection =
new String[] {
// NOTE: don't change the order!
MediaStore.Images.Media.DATA,
MediaStore.Images.Media.DATE_TAKEN,
MediaStore.Images.Media.MIME_TYPE,
MediaStore.Images.Media.SIZE,
MediaStore.Images.Media.ORIENTATION
};
Uri images = MediaStore.Files.getContentUri("external");
String selection, selectionArgs[];
selection =
MediaStore.Files.FileColumns.MEDIA_TYPE
+ "=? and "
+ MediaStore.Files.FileColumns.PARENT
+ "=?";
selectionArgs =
new String[] {
String.valueOf(MediaStore.Files.FileColumns.MEDIA_TYPE_IMAGE), String.valueOf(albumId)
};
Cursor cur =
context
.getContentResolver()
.query(
images,
projection,
selection,
selectionArgs,
" " + MediaStore.Images.Media.DATE_TAKEN + " DESC " + limit);
if (cur != null) {
if (cur.moveToFirst()) do list.add(new Media(cur)); while (cur.moveToNext());
cur.close();
}
return list;
}
public static long getAlbumId(Context context, String mediaPath) {
long id = -1;
Cursor cur =
context
.getContentResolver()
.query(
MediaStore.Files.getContentUri("external"),
new String[] {MediaStore.Files.FileColumns.PARENT},
MediaStore.Files.FileColumns.DATA + "=?",
new String[] {mediaPath},
null);
if (cur != null && cur.moveToNext()) {
id = cur.getLong(0);
cur.close();
}
return id;
}
@TestOnly
private String getThumbnailPath(Context context, long id) {
Cursor cursor =
MediaStore.Images.Thumbnails.queryMiniThumbnail(
context.getContentResolver(),
id,
MediaStore.Images.Thumbnails.MINI_KIND,
new String[] {MediaStore.Images.Thumbnails.DATA});
if (cursor.moveToFirst())
return cursor.getString(cursor.getColumnIndex(MediaStore.Images.Thumbnails.DATA));
return null;
}
}
| 1 | 13,744 | Why changes in this file? | fossasia-phimpme-android | java |
@@ -26,6 +26,7 @@ public final class CommentPatterns {
Pattern.compile("\\[([^\\]]+)\\]\\((\\p{Alpha}+:[^\\)]+)\\)");
public static final Pattern CLOUD_LINK_PATTERN =
Pattern.compile("\\[([^\\]]+)\\]\\(((?!\\p{Alpha}+:)[^\\)]+)\\)");
- public static final Pattern PROTO_LINK_PATTERN = Pattern.compile("\\[([^\\]]+)\\]\\[[^\\]]*\\]");
+ public static final Pattern PROTO_LINK_PATTERN =
+ Pattern.compile("\\[([^\\]]+)\\]\\[([A-Za-z_][A-Za-z_.0-9]*)*\\]");
public static final Pattern HEADLINE_PATTERN = Pattern.compile("^#+", Pattern.MULTILINE);
} | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen;
import java.util.regex.Pattern;
/**
* Collects common regular expressions for formatting source comments to follow language-idiomatic
* style.
*/
public final class CommentPatterns {
public static final Pattern BACK_QUOTE_PATTERN = Pattern.compile("(?<!`)``?(?!`)");
public static final Pattern ABSOLUTE_LINK_PATTERN =
Pattern.compile("\\[([^\\]]+)\\]\\((\\p{Alpha}+:[^\\)]+)\\)");
public static final Pattern CLOUD_LINK_PATTERN =
Pattern.compile("\\[([^\\]]+)\\]\\(((?!\\p{Alpha}+:)[^\\)]+)\\)");
public static final Pattern PROTO_LINK_PATTERN = Pattern.compile("\\[([^\\]]+)\\]\\[[^\\]]*\\]");
public static final Pattern HEADLINE_PATTERN = Pattern.compile("^#+", Pattern.MULTILINE);
}
| 1 | 19,430 | This doesn't look right. I think what you want here is `\\[([^\\]]+)\\]\\[[A-Za-z_][A-Za-z_.0-9]*]*\\]`. | googleapis-gapic-generator | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.